markovchain/0000755000176200001440000000000015140042215012546 5ustar liggesusersmarkovchain/tests/0000755000176200001440000000000015137702633013725 5ustar liggesusersmarkovchain/tests/testthat/0000755000176200001440000000000015140042215015550 5ustar liggesusersmarkovchain/tests/testthat/testFits.R0000644000176200001440000000165215137702633017521 0ustar liggesuserscontext("Checking that fitting works") #load & prepare data data(rain) data(holson) myHolson<-as.matrix(holson[,-1]); rownames(myHolson)<-holson$id test_that("Check createSequenceMatrix", { expect_equal(createSequenceMatrix(rain$rain), checksAlofiRawTransitions) }) #data preparation ciao<-c("a","a","b","b","a",NA,"b","a","b","a","a") test_that("Check markovchainFit & listFit", { expect_equal(markovchainFit(ciao), simpleMcCiaoFit) expect_equal(markovchainListFit(data=myHolson), checkmarkovchainFitList) }) #### tests for noofVisitsDist function transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") answer <- c(0.348148, 0.651852) names(answer) <- c("a","b") test_that("Check noofVisitsDist works", { expect_equal(noofVisitsDist(simpleMc,5,"a"),answer) }) markovchain/tests/testthat/testPeriod.R0000644000176200001440000000302615137702633020033 0ustar liggesusers# Period examples from http://www.math.wisc.edu/~anderson/605F11/Notes/StochBioChapter3.pdf #library(markovchain) mcPeriodic<-new("markovchain", states=c("0","1","2"), transitionMatrix= matrix(c(0,1,0, 0,0,1, 1,0,0),nrow=3, byrow=TRUE, dimnames=list(c("0","1","2"), c("0","1","2")) )) myMatr<-matrix(c(0 , 0 , 1/2 , 1/4 , 1/4 , 0 , 0, 0 , 0 , 1/3 , 0 , 2/3, 0 , 0 , 0 , 0 , 0 , 0 , 0 , 1/3 , 2/3 , 0 , 0 , 0 , 0 , 0 , 1/2 , 1/2 , 0 , 0 , 0 , 0 , 0 , 3/4 , 1/4 , 1/2, 1/2 , 0 , 0 , 0 , 0 , 0 , 1/4, 3/4 , 0 , 0 , 0 , 0 , 0),byrow=TRUE, nrow = 7) mcPeriodic2<-as(myMatr, "markovchain") mcAperiodic<-new("markovchain", states=c("0","1","2","3","4"), transitionMatrix= matrix(c(1/2,1/2,0,0,0, 1/2,0,1/2,0,0, 0,1/2,0,1/2,0, 0,0,1/2,0,1/2, 0,0,0,1,0),nrow=5, byrow=TRUE, dimnames=list(c("0","1","2","3","4"), c("0","1","2","3","4")) )) ####end creating DTMC context("Basic DTMC proprieties") test_that("States are those that should be", { expect_equal(period(mcPeriodic),3) expect_equal(period(mcPeriodic2),3) expect_equal(period(mcAperiodic),1) })markovchain/tests/testthat/testBasic1.R0000644000176200001440000004471715137702633017727 0ustar liggesusers#library(markovchain) #create basic markov chains markov1<-new("markovchain", states=c("a","b","c"), transitionMatrix= matrix(c(0.2,0.5,0.3, 0,1,0, 0.1,0.8,0.1),nrow=3, byrow=TRUE, dimnames=list(c("a","b","c"), c("a","b","c")) )) mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ####end creating DTMC context("Basic DTMC proprieties") test_that("States are those that should be", { expect_equal(absorbingStates(markov1), "b") expect_equal(transientStates(markov1), c("a","c")) expect_equal(is.irreducible(mathematicaMc),FALSE) expect_equal(transientStates(mathematicaMc), c("a","b")) expect_equal(is.accessible(mathematicaMc, "a", "c"),TRUE) expect_equal(canonicForm(mathematicaMc)@transitionMatrix, markovchain:::.canonicFormRcpp(mathematicaMc)@transitionMatrix) expect_equal(recurrentClasses(mathematicaMc), list(c("c", "d"), c("e"))) expect_equal(summary(mathematicaMc), list(closedClasses = list(c("c", "d"), c("e")), recurrentClasses = list(c("c", "d"), c("e")), transientClasses = list(c("a", "b")))) }) ###testing proper conversion of objects context("Conversion of objects") provaMatr2Mc<-as(mathematicaMatr,"markovchain") test_that("Conversion of objects", { expect_equal(class(provaMatr2Mc)=="markovchain",TRUE) }) ### Markovchain Fitting sequence1 <- c("a", "b", "a", "a", NA, "a", "a", NA) sequence2 <- c(NA, "a", "b", NA, "a", "a", "a", NA, "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a", NA) mcFit <- markovchainFit(data = sequence1, byrow = FALSE, sanitize = TRUE) mcFit2 <- markovchainFit(c("a","b","a","b"), sanitize = TRUE) test_that("Fit should satisfy", { expect_equal((mcFit["logLikelihood"])[[1]], log(1/3) + 2*log(2/3)) expect_equal(markovchainFit(data = sequence2, method = "bootstrap")["confidenceInterval"] [[1]]["confidenceLevel"][[1]], 0.95) expect_equal(mcFit2$upperEndpointMatrix, matrix(c(0,1,1,0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) }) ### Markovchain Fitting for bigger markov chain bigseq <- rep(c("a", "b", "c"), 500000) bigmcFit <- markovchainFit(bigseq) test_that("MC Fit for large sequence 1", { expect_equal(bigmcFit$logLikelihood, 0) expect_equal(bigmcFit$confidenceLevel, 0.95) expect_equal(bigmcFit$estimate@transitionMatrix, bigmcFit$upperEndpointMatrix) }) bigmcFit <- markovchainFit(bigseq, sanitize = TRUE) test_that("MC Fit for large sequence 2", { expect_equal(bigmcFit$logLikelihood, 0) expect_equal(bigmcFit$confidenceLevel, 0.95) expect_equal(bigmcFit$estimate@transitionMatrix, bigmcFit$upperEndpointMatrix) }) ### Markovchain Fitting For dataframe or matrix as an input matseq <- matrix(c("a", "b", "c", NA ,"b", "c"), nrow = 2, byrow = T) # for matrix as input test_that("Markovchain Fit for matrix as input", { # for matrix as input expect_equal(markovchainFit(matseq)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(markovchainFit(matseq, sanitize = TRUE)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) # for data frame as input expect_equal(markovchainFit(as.data.frame(matseq))$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(markovchainFit(as.data.frame(matseq), sanitize = TRUE)$estimate@transitionMatrix, matrix(c(0, 1, 0, 0, 0, 1, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) ### Markovchain Fitting(mle) with sanitize parameter mle_sequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") mle_fit1 <- markovchainFit(mle_sequence) mle_fit2 <- markovchainFit(mle_sequence, sanitize = TRUE) test_that("MarkovchainFit MLE", { expect_equal(mle_fit1$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(mle_fit2$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(mle_fit1$logLikelihood, mle_fit2$logLikelihood) expect_equal(mle_fit1$confidenceInterval, mle_fit2$confidenceInterval) expect_equal(mle_fit2$standardError, mle_fit2$standardError) }) ### Markovchain Fitting(laplace) with sanitize parameter lap_sequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") lap_fit1 <- markovchainFit(lap_sequence, "laplace") lap_fit2 <- markovchainFit(lap_sequence, "laplace", sanitize = TRUE) test_that("Markovchain Laplace", { expect_equal(lap_fit1$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(lap_fit2$estimate@transitionMatrix, matrix(c(0.5, 0.5, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(lap_fit1$logLikelihood, lap_fit2$logLikelihood) }) ### Markovchain Fitting when some states are not present in the given sequence mix_seq <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") mix_fit1 <- markovchainFit(mix_seq, "mle", sanitize = TRUE, possibleStates = c("d")) mix_fit2 <- markovchainFit(mix_seq, "laplace", sanitize = TRUE, possibleStates = c("d")) mix_fit3 <- markovchainFit(mix_seq, "map", sanitize = TRUE, possibleStates = c("d")) test_that("Mixture of Markovchain Fitting", { expect_equal(mix_fit2$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) expect_equal(mix_fit1$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) expect_equal(mix_fit3$estimate@transitionMatrix, matrix(c(.5, .5, 0, 0, 3/7, 3/7, 1/7, 0, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4, 1/4), nrow = 4, byrow = TRUE, dimnames = list(c("a", "b", "c", "d"), c("a", "b", "c", "d")) ) ) }) ### Test for createSequenceMatrix rsequence <- c("a", "b", NA, "b", "b", "a", "a", "a", "b", "b", NA, "b", "b", "a", "a", "b", "a", "a", "b", "c") test_that("createSequenceMatrix : Permutation of parameters",{ expect_equal(createSequenceMatrix(rsequence, FALSE, FALSE), matrix(c(4, 4, 0, 3, 3, 1, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, FALSE, TRUE), matrix(c(4, 4, 0, 3, 3, 1, 1, 1, 1), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, TRUE, FALSE), matrix(c(4/8, 4/8, 0, 3/7, 3/7, 1/7, 0, 0, 0), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_equal(createSequenceMatrix(rsequence, TRUE, TRUE), matrix(c(4/8, 4/8, 0, 3/7, 3/7, 1/7, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) ### Test for createSequenceMatrix : input nx2 matrix data <- matrix(c("a", "a", "b", "a", "b", "a", "b", "a", NA, "a", "a", "a", "a", "b", NA, "b"), ncol = 2, byrow = TRUE) test_that("createSequenceMatrix : input as matrix",{ expect_equal(createSequenceMatrix(data), matrix(c(2, 1, 3, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) expect_equal(createSequenceMatrix(data, toRowProbs = TRUE), matrix(c(2/3, 1/3, 3/3, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b")))) expect_equal(createSequenceMatrix(data, toRowProbs = TRUE, possibleStates = "d", sanitize = TRUE), matrix(c(2/3, 1/3, 0, 1, 0, 0, 1/3, 1/3, 1/3), nrow = 3, byrow = TRUE, dimnames = list(c("a", "b", "d"), c("a", "b", "d")))) }) ### Test for markovchainSequence and rmarkovchain statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) s1 <- markovchainSequence(10, mcB) s2 <- markovchainSequence(10, mcB, include.t0 = TRUE) s3 <- markovchainSequence(10, mcB, t0 = "b", include.t0 = TRUE) s4 <- markovchainSequence(10, mcB, useRCpp = FALSE) s5 <- markovchainSequence(10, mcB, include.t0 = TRUE, useRCpp = FALSE) s6 <- markovchainSequence(10, mcB, t0 = "b", include.t0 = TRUE, useRCpp = FALSE) test_that("Output format of markovchainSequence", { expect_equal(length(s1), 10) expect_equal(length(s2), 11) expect_equal(length(s3), 11) expect_equal(s3[1], "b") expect_equal(length(s4), 10) expect_equal(length(s5), 11) expect_equal(length(s6), 11) expect_equal(s6[1], "b") }) test_that("markovchainSequence validates the requested length", { expect_error(markovchainSequence(NA, mcB), "`n` must be a finite numeric scalar") expect_error(markovchainSequence(1.5, mcB), "`n` must be an integer value") expect_error(markovchainSequence(-1, mcB), "`n` must be greater than or equal to 0") }) test_that("markovchainSequence returns zero-length sequences when requested", { expect_equal(length(markovchainSequence(0, mcB)), 0) expect_identical(markovchainSequence(0, mcB, include.t0 = TRUE, t0 = "b"), "b") expect_identical(markovchainSequence(0, mcB, include.t0 = TRUE, t0 = "b", useRCpp = FALSE), "b") }) statesNames <- c("a", "b", "c") mcA <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcC <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) o1 <- rmarkovchain(15, mclist, "list") o2 <- rmarkovchain(15, mclist, "matrix") o3 <- rmarkovchain(15, mclist, "data.frame") o4 <- rmarkovchain(15, mclist, "list", t0 = "a", include.t0 = TRUE) o5 <- rmarkovchain(15, mclist, "matrix", t0 = "a", include.t0 = TRUE) o6 <- rmarkovchain(15, mclist, "data.frame", t0 = "a", include.t0 = TRUE) test_that("Output format of rmarkovchain", { expect_equal(length(o1), 15) expect_equal(length(o1[[1]]), 3) expect_equal(all(dim(o2) == c(15, 3)), TRUE) expect_equal(all(dim(o3) == c(45, 2)), TRUE) expect_equal(length(o4), 15) expect_equal(length(o4[[1]]), 4) expect_equal(o4[[1]][1], "a") expect_equal(all(dim(o5) == c(15, 4)), TRUE) expect_equal(all(o5[, 1] == "a"), TRUE) expect_equal(all(dim(o6) == c(60, 2)), TRUE) }) ### MAP fit function tests data1 <- c("a", "b", "a", "c", "a", "b", "a", "b", "c", "b", "b", "a", "b") data2 <- c("c", "a", "b") test_that("MAP fits must satisfy", { expect_identical(markovchainFit(data1, method = "map")$estimate@transitionMatrix, markovchainFit(data1, method = "mle")$estimate@transitionMatrix) expect_identical(markovchainFit(data1, method = "map")$estimate@transitionMatrix, matrix(c(0.0, 0.6, 0.5, 0.8, 0.2, 0.5, 0.2, 0.2, 0.0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_identical(markovchainFit(data1, method = "map", hyperparam = matrix(c(2, 1, 3, 4, 5, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c"))))$estimate@transitionMatrix, matrix(c(1/10, 3/10, 3/5, 7/10, 5/10, 2/5, 2/10, 2/10, 0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) test_that("predictiveDistribution must satisfy", { expect_equal(predictiveDistribution(data1, character()), 0) expect_equal(predictiveDistribution(data1, data2, hyperparam = matrix(c(2, 1, 3, 4, 5, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))), log(4 / 13)) }) test_that("inferHyperparam must satisfy", { expect_identical(inferHyperparam(data = data1)$dataInference, matrix(c(1, 4, 2, 5, 2, 2, 2, 2, 1), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) expect_identical(inferHyperparam(transMatr = matrix(c(0.0, 0.6, 0.5, 0.8, 0.2, 0.5, 0.2, 0.2, 0.0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c"))), scale = c(10, 10, 10))$scaledInference, matrix(c(0, 6, 5, 8, 2, 5, 2, 2, 0), nrow = 3, dimnames = list(c("a", "b", "c"), c("a", "b", "c")))) }) pDRes <- c(log(3/2), log(3/2)) names(pDRes) <- c("a", "b") test_that("priorDistribution must sastisfy", { expect_equal(priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), matrix(c(2, 2, 2, 2), nrow = 2, dimnames = list(c("a", "b"), c("a", "b")))), pDRes) }) energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("steadyStates must satisfy", { expect_identical(steadyStates(molecularCTMC), matrix(c(1/4, 3/4), nrow = 1, dimnames = list(c(), energyStates))) }) ### Tests for expectedRewards function ### Examples taken from Stochastic Processes: Theory for Applications, Robert G. Gallager,Cambridge University Press transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) test_that("expectedRewards must satisfy", { expect_equal(expectedRewards(simpleMc,1,c(0,1)),c(0.01,1.99)) expect_equal(expectedRewards(simpleMc,2,c(0,1)),c(0.0298,2.9702)) }) ### Tests for committorAB function transMatr <- matrix(c(0,0,0,1,0.5, 0.5,0,0,0,0, 0.5,0,0,0,0, 0,0.2,0.4,0,0, 0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") answer <- c(0.444,0.889,0.000,0.444,1.000) names <- c("a","b","c","d","e") names(answer) <- names test_that("committorAB must satisfy", { expect_equal(round(committorAB(object,c(5),c(3)),3),answer) }) ### Tests for firstPassageMultiple function statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) answer <- matrix(c(.8000, 0.6000, 0.2540 ),nrow = 3,dimnames = list(c("1","2","3"),"set")) test_that("firstPassageMultiple function satisfies", { expect_equal(firstPassageMultiple(testmarkov,"a",c("b","c"),3),answer) }) markovchain/tests/testthat/testMultinomCI.R0000644000176200001440000000327015137702633020632 0ustar liggesusers#library(markovchain) seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcfit<-markovchainFit(data=seq,byrow=TRUE) # print(mcfit) seqmat<-createSequenceMatrix(seq) seqmat mCI <- .multinomialCIRcpp(mcfit$estimate@transitionMatrix, seqmat, 0.95) # print(mCI) ####end of creating multinomialCI context("Multinomial confidence interval") test_that("multinomial CI statisfay", { # expect_equal(mCI$lowerEndpointMatrix, matrix(c(0.2222222,0.3333333, # 0.5714286,0.1428571),nrow=2, byrow=TRUE, dimnames=list(c("a","b"), # c("a","b")) # )) # expect_equal(mCI$upperEndpointMatrix, matrix(c(0.8111456,0.9222567, # 1,0.6839473),nrow=2, byrow=TRUE, dimnames=list(c("a","b"), # c("a","b")) # )) expect_equal(mCI$upperEndpointMatrix[2,1],1) }) # Multinomial distribution with 3 classes, from which 79 samples # were drawn: 23 of them belong to the first class, 12 to the # second class and 44 to the third class. Punctual estimations # of the probabilities from this sample would be 23/79, 12/79 # and 44/79 but we want to build 95% simultaneous confidence intervals # for the true probabilities # m = multinomialCI(c(23,12,44), 0.05) # print(paste("First class: [", m[1,1], m[1,2], "]")) # print(paste("Second class: [", m[2,1], m[2,2], "]")) # print(paste("Third class: [", m[3,1], m[3,2], "]")) # seq<-c(4, 5) # m = multinomialCI(seq, 0.05) # m markovchain/tests/testthat/testStatesClassification.R0000644000176200001440000000430215137702633022726 0ustar liggesuserscontext("Classification of states") A <- structure(c(0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0.5, 0.3, 0.3, 0, 0, 0, 0, 0, 0.5, 0.7, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.3, 0.4, 0, 0, 0, 0, 0, 0.4, 0, 0.5, 0, 0, 0, 0, 0, 0.6, 0.7, 0, 0, 0, 0, 0, 0, 0, 0, 0.1, 1), .Dim = c(8L, 8L), .Dimnames = list( c("1", "2", "3", "4", "5", "6", "7", "8"), c("1", "2", "3", "4", "5", "6", "7", "8"))) mchain <- new("markovchain", transitionMatrix=A) #summary(mchain) test_that("States are those that should be", { expect_equal(recurrentClasses(mchain), list(c("3", "4"), c("8"))) expect_equal(transientStates(mchain), c("1", "2", "5", "6", "7")) expect_equal(absorbingStates(mchain), "8") }) #https://www.math.ucdavis.edu/~gravner/MAT135B/materials/ch13.pdf mcMatr1<-markovchain:::zeros(3) mcMatr1[1,]<-c(0.5,0.5,0) mcMatr1[2,]<-c(0.5,0.25,0.25) mcMatr1[3,]<-c(0,1/3,2/3) mc1<-as(mcMatr1,"markovchain") test_that("States are those that should be", { expect_equal(is.irreducible(mc1),TRUE) }) mcMatr2<-matrix(c(0, 0, 1/2, 1/2,1, 0 ,0, 0,0, 1, 0, 0,0, 1, 0, 0),ncol = 4,byrow=TRUE) mc2<-as(mcMatr2,"markovchain") test_that("States are those that should be", { expect_equal(recurrentClasses(mc2),list(c("s1","s2","s3","s4"))) }) mcMatr3<-matrix(c( 0,1,0,0,0,0, 0.4,0.6,0,0,0,0, 0.3,0,0.4,0.2,0.1,0, 0,0,0,0.3,0.7,0, 0,0,0,0.5,0,0.5, 0,0,0,0.3,0,0.7),nrow = 6,byrow=TRUE) mc3<-as(mcMatr3,"markovchain") recurrentClasses(mc3) transientStates(mc3) #canonicForm(mc3) test_that("States are those that should be", { expect_equal(recurrentClasses(mc3),list(c("s1","s2"),c("s4","s5","s6") )) expect_equal(transientStates(mc3),"s3") }) mcMatr4<-markovchain:::zeros(5) mcMatr4[1:2,1:2]<-0.5*markovchain:::ones(2) mcMatr4[5,1]<-1 mcMatr4[3,3]<-1 mcMatr4[4,3:4]<-0.5 mc4<-as(mcMatr4,"markovchain") test_that("States are those that should be", { expect_equal(recurrentClasses(mc4),list(c("s1","s2"),c("s3"))) expect_equal(absorbingStates(mc4),"s3") expect_equal(transientStates(mc4),c("s4","s5")) } ) markovchain/tests/testthat/testctmc.R0000644000176200001440000001065515137702633017545 0ustar liggesuserslibrary(markovchain) context("Checking that ExpectedTime function works as expected; it depends on ctmcd") # Example from the book Markovchains, J. R. Norris, Cambridge University Press states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") test_that("Check Expected hitting time from one state to another",{ # Skip the test if the ctmcd package is not available if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(ExpectedTime(ctmc,1,4),7) expect_equal(ExpectedTime(ctmc,2,4),5.5) }) context("Checking that probabilityatT function works as expected") # TESTS for probabilityatT function # Example taken from the book INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley states <- c("a","b","c","d","e") # taken exactly from book ansMatrix <- matrix(data = c(0.610, 0.290, 0.081, 0.016, 0.003, 0.232, 0.443, 0.238, 0.071, 0.017, 0.052, 0.190, 0.435, 0.238, 0.085, 0.008, 0.045, 0.191, 0.446, 0.310, 0.001, 0.008, 0.054, 0.248, 0.688),nrow = 5,byrow = T,dimnames = list(states,states)) byRow <- TRUE gen <- matrix(c(-1/4,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-9/20,1/4,0,0,0,1/5,-1/5), nrow=5,byrow=byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") test_that("Check probabilityatT using a ctmc object:",{ if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(round(probabilityatT(ctmc,2.5),3),ansMatrix) }) ### Adds tests for impreciseprobabilityatT function context("Checking that impreciseprobabilityatT function works as expected:") states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = T,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) test_that("Check impreciseProbabilityatT function using an ictmc object:",{ if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(round(impreciseProbabilityatT(ictmc,2,0,1,error = 10^-3),4),c(0.0083,0.1410)) }) ### Adds tests for freq2Generator function sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) answer <- matrix(c( -0.024, 0.015, 0.009, 0, 0.007, -0.018, 0.012, 0, 0.013 , 0.007, -0.021, 0, 0.000, 0.000, 0.000, 0 ),nrow = 4,byrow = TRUE) test_that("Check if ",{ if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(round(freq2Generator(sample_rel,1),3),answer) }) ### tests for is.CTMCirreducible fcuntion energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("is.CTMCirreducible works", { if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(is.CTMCirreducible(molecularCTMC),TRUE) }) ### tests for is.TimeReversible function energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") test_that("is.TimeReversible works", { if (!requireNamespace("ctmcd", quietly = TRUE)) { skip("The ctmcd package is not available") } expect_equal(is.TimeReversible(molecularCTMC),TRUE) }) markovchain/tests/testthat/test-sparse.R0000644000176200001440000000016615137702633020165 0ustar liggesuserstest_that("sparse transition matrix is accepted", { expect_is(as(sparsematrix, "markovchain"), "markovchain") }) markovchain/tests/testthat.R0000644000176200001440000000010415137702633015703 0ustar liggesuserslibrary(testthat) library(markovchain) test_check("markovchain") markovchain/MD50000644000176200001440000001750015140042215013061 0ustar liggesuserse07782eca67d57aa382adb29d88a7bf4 *DESCRIPTION 68857cf199687465e7a83880494dca6d *LICENSE 3ab103442e49de45ffeee91e4f7294bf *NAMESPACE d0352e51a472e0c2cd6f93171635e6cf *NEWS.md 984f3d39a96d96f00d0ce5411f21ed17 *R/RcppExports.R ab6b6527178986dc26f25ce8a963ca43 *R/classesAndMethods.R de2bd558f0b4120acaaf8872ca2299f2 *R/ctmcClassesAndMethods.R 6ccb09d5d44f9d3de7de3cd22005b2aa *R/ctmcProbabilistic.R 8c7ce890d00d92ce802b378a2bcf1dfd *R/data.R a99314ea622a69a8e36076b40bc5fbdd *R/fitHigherOrder.R 48abce8ef8070cce01549b1ac4b3b177 *R/fittingFunctions.R c0b6439b5ec5882b173a83de5013c43d *R/hommc.R 66c52b6220d1662e4e8ce3b10c3cc023 *R/markovchain.R 2fa8aca8212bfacdd1524bd97f3c4e5c *R/matlab_package_functions.R c173f5676e42f00709b9423416bfd27e *R/probabilistic.R 8c90c159a9489b0f5dfb8ce08e0495e8 *R/random.R 41fc8f8e9367fd173a69c2f7feebfc3a *R/statisticalTests.R 993a960ae22cf15eee4f42b12007acf1 *R/supplementaryPlot.R 3896f7def4ba0c02fda94817b442116d *R/sysdata.rda 39b735feac8bc2ead976b8f9d61db649 *R/utils.R db74c50b2544eddf346897e1a7a1e11d *R/zzz.R c248ed4f77e9159cf733ae0b2280027a *README.md 796188af0c7373a177c5ea59a4e5d6d2 *build/vignette.rds c14c6c7a873710afa28c2d02c9f3a2bf *data/blanden.rda 56e85b9432521573f94c62d9ab6719ce *data/craigsendi.rda e6cacc2a56553672127158ecfa03e928 *data/holson.rda 091792dced35e1bb1a85ae9951eed255 *data/kullback.rda 0e809da63049c577a4206fdd9c493d26 *data/preproglucacon.rda 717b5b6bc2cdc075c07145d829086e5d *data/rain.rda 22c81e8efd8818eb028d2c29c5dacc8b *data/sales.rda 229b5f800fb4a05c181c0255b176eaee *data/tm_abs.rda 7062e44a453478115862d4383d21b1aa *demo/00Index eb83a730ed1c1eba855090a0d69dfadc *demo/bard.R a1df0201d5d4857c191652b6bd3cbb3d *demo/computationTime.R c6846ab0f58e3101e2883adb07f10f45 *demo/examples.R 1bda9a00a349a96075bf3c62e9ba6cb1 *demo/extractMatrices.R 89142f6fd07792388e74fd69002dffeb *demo/mathematica.R 837a7db4def223b7ba17ba93f52039f5 *demo/quasiBayesian.R 913db4680e94bd31babce7f761c459be *demo/quickStart.R e311d6dabce00a3de764cf58d8698a9a *demo/reliability.R ab9ed3fb5bf0e27922c5f0a0ae540c72 *inst/CITATION aa940addbddf7d9ae7bd9bb17cc61f2c *inst/doc/an_introduction_to_markovchain_package.R 188e3e0fc06b0132d66029180117be7b *inst/doc/an_introduction_to_markovchain_package.Rmd fa026a5d075f465ce53312f956c7e10c *inst/doc/an_introduction_to_markovchain_package.pdf c2bedc94f73fce561113feaf3ce0f0e2 *inst/doc/gsoc_2017_additions.R dce63235269c77778c9417678e262b8f *inst/doc/gsoc_2017_additions.Rmd fecdd2c33c9de09130afd07f6687a898 *inst/doc/gsoc_2017_additions.html c54bccab235c1b2ddb7b002273cb6700 *inst/doc/higher_order_markov_chains.R 875c852572812e1c9b0124da2f1a7907 *inst/doc/higher_order_markov_chains.Rmd 2b91c0e212d13bab2738c4d8b37c7330 *inst/doc/higher_order_markov_chains.html 730fbb92cced04056c80f883f3566252 *inst/extdata/ltdItaData.txt 75bf84bd1d2c1ad2c18b97199443d27f *man/ExpectedTime.Rd 3a7587d2c12ba2a1652ad7e38cc69443 *man/HigherOrderMarkovChain-class.Rd 2207974462e269879736e2aa4f45f3bf *man/absorptionProbabilities.Rd 984657ac88a51f09cd8013b2ad5ccf81 *man/blanden.Rd 515e8fd8bfffcfe1cf5349200a7682a6 *man/committorAB.Rd 6a4eb256b4f993212a37f877712edf59 *man/conditionalDistribution.Rd cd4830df7e3f9ab722d4edb724aa9c1e *man/craigsendi.Rd 84d44f1e1dae8840ee345b2368ebede6 *man/ctmc-class.Rd 7b1edcd40aab0fa00c37a2b8b0a32588 *man/ctmcFit.Rd 1a601c7098ade1f736a926f7e33bb90b *man/expectedRewards.Rd 13ef966850f4dae14aaa7221b1181720 *man/expectedRewardsBeforeHittingA.Rd aff4c6974caafc84c7d410d6d706ad62 *man/firstPassage.Rd bbaac4bed94a47e88deacbf976b86e9e *man/firstPassageMultiple.Rd d2c9c7016cb65119ae51207965beac19 *man/fitHighOrderMultivarMC.Rd 24b9ff5bd194750b8bc5da4f66775cd7 *man/fitHigherOrder.Rd 11f3ca6a0117afde424469092c385e59 *man/freq2Generator.Rd dad43a04be77e9a9b7ae1d3a9fb7a495 *man/generatorToTransitionMatrix.Rd c9840a546f9357451f76ca09b23a0bd8 *man/getName.Rd cf61b91702f4f73d44e9fd5608ca4d65 *man/hittingProbabilities.Rd add6fde771cc5bf6c02827ea3536fc96 *man/holson.Rd a46092594d426a3bedb4bd1782da2da2 *man/hommc-class.Rd 297096be86b88ebeaabd6ee7ed0a6486 *man/hommc-show.Rd 10dbf12cf0346f0054d567cd09fb65ff *man/ictmc-class.Rd 592eb7fba6781dfdcee2d2dff82df000 *man/impreciseProbabilityatT.Rd eb6b353a91450ff5674cb25090d46d70 *man/inferHyperparam.Rd db14356077967b114596ac92021a8a20 *man/is.CTMCirreducible.Rd 61cd70542034adea9588515a291dc19c *man/is.TimeReversible.Rd 4dceef72c917d3e66e97adfd880435c9 *man/is.accessible.Rd 93a3d28c78d94b800997dc44077bd480 *man/is.irreducible.Rd df3496a2d6e96dd76409386c6d3f40f0 *man/is.regular.Rd 44c9b0250b8fd6a2e46bcd0a8545f59b *man/kullback.Rd c51527c9bc308340e1cd43be60561e83 *man/markovchain-class.Rd 55422152b03801e04f936eb75090afc2 *man/markovchain-package.Rd 1f9c346ec9e424d384535c24540a9079 *man/markovchainFit.Rd c3c1420c4aec6d3ee30ff46f68756329 *man/markovchainList-class.Rd 16c1cbb7705b17ebb518f1106583dfa2 *man/markovchainListFit.Rd 9be58105d48ce7cbefa8ad420a47f2c0 *man/markovchainSequence.Rd abbaca484a9abb47564201cf4dd69834 *man/meanAbsorptionTime.Rd a26bc397e9698f97f458b3af618a88b3 *man/meanFirstPassageTime.Rd 3f11047d088c7d3e5848c6dca4c02493 *man/meanNumVisits.Rd a51431e1948ef165709ff5329b469948 *man/meanRecurrenceTime.Rd 2f53a77e485cefec6a31883806563a7e *man/multinomialConfidenceIntervals.Rd 5fa42ddb7efca956e8e6c354bced7302 *man/names.Rd c7d4ada0e1a238d53ec005b32d93515f *man/noofVisitsDist.Rd 0100107bf1240600eeb5113b53dde8f5 *man/ones.Rd f371b228dbd222f85535f1d4c9219b43 *man/predictHommc.Rd fc9bb6a67bee60dda7b4dad14bf7f3ca *man/predictiveDistribution.Rd 29a6c0777531665ad64fc774c6e87e25 *man/preproglucacon.Rd d1755ab27b558f63b9e740af441f7777 *man/priorDistribution.Rd 80fbd5430cc4b8d68d031b1424b62a75 *man/probabilityatT.Rd 4817c2004dc1ac01c19cc8f9a3212b45 *man/rain.Rd 9d4ee477a6139b177ed5d218b12e7fb3 *man/rctmc.Rd 954bc6c4ee8923a7ecec4928f121e9c1 *man/rmarkovchain.Rd 0c8479de816d083d212b802a03c6e802 *man/sales.Rd 4ba99b5946a10b828a943aeedf40e50a *man/setName.Rd be0b6000cb92dc3757c227737ebcef4c *man/states.Rd a28a8b97f9354cb3bf08733359978209 *man/statisticalTests.Rd 3825bbb52da090aae367cac4de6f13f0 *man/steadyStates.Rd 84702a1a7cdd752c4d65bdd9b2abb26c *man/structuralAnalysis.Rd cc0565a34593f573e482e92783e6cd27 *man/tm_abs.Rd b60ba3fe4b6b18d4e99878b7b259562a *man/transition2Generator.Rd 19d5e66645633e19a90cd83e3049b0c9 *man/transitionProbability.Rd d4e1724cc0091b2ddf22097d1f1b2075 *man/zeros.Rd 7a3aeb1aa851dfb26ed4b62a9db06cec *src/Makevars 46afad93db0f8369499180b529bbac9a *src/Makevars.win 4c8bf0a3e340b7aa84647f337918e08b *src/RcppExports.cpp 4b77a04f31755a0fc89baed5d50c8aca *src/classesAndMethods.cpp e28005d65e2b51dac4973cb40de229b5 *src/ctmcClassesAndMethods.cpp 100a776c9e36949f4bc77fa3467ba1b6 *src/ctmcFittingFunctions.cpp 02ce3f3715895ea5a19302312ea0eabf *src/ctmcProbabilistic.cpp d99e875a963f6fbaf1734b3470ca67fb *src/fitHigherOrder.cpp cbcacafb2fd1c03fe8899f1d64706df7 *src/fittingFunctions.cpp 8c9f0905b63553383414ccbbf6dfbea5 *src/helpers.h 7473949d18125aa932e8c3a89da4230b *src/mapFitFunctions.h 671ba94f3c19dc66524665fd56d2b82c *src/multinomCI.cpp 693f21d800e439415365d7d75ca318a5 *src/probabilistic.cpp 8118d2d4fb74432e8cee34693663e394 *src/utils.cpp 9ca86bdb9770b1d4f09faaccd4b207c0 *tests/testthat.R 69833e484d922ca478fddbf26d0ecf44 *tests/testthat/test-sparse.R 7e3916fd37c9d2425d81a80ff56b1dad *tests/testthat/testBasic1.R 95866d62c44f628264aac082f16f8562 *tests/testthat/testFits.R c6b1cc046d56e93de12430fa159cca00 *tests/testthat/testMultinomCI.R 76426c64e1f40feec3af8d9efe2b77fc *tests/testthat/testPeriod.R 9fda6f42d517e727c4a080c8a1fe8baf *tests/testthat/testStatesClassification.R c84a4418ecfd4c94fb810586787b3d40 *tests/testthat/testctmc.R 188e3e0fc06b0132d66029180117be7b *vignettes/an_introduction_to_markovchain_package.Rmd dce63235269c77778c9417678e262b8f *vignettes/gsoc_2017_additions.Rmd 875c852572812e1c9b0124da2f1a7907 *vignettes/higher_order_markov_chains.Rmd 2c61f3e82b842145f0bc5018de21d1e2 *vignettes/markovchainBiblio.bib 057d08248e65d85ee90839f9c2e6ba4f *vignettes/template.tex markovchain/R/0000755000176200001440000000000015137710161012757 5ustar liggesusersmarkovchain/R/ctmcProbabilistic.R0000644000176200001440000005154715137702633016560 0ustar liggesusers#' @title rctmc #' #' @description The function generates random CTMC transitions as per the #' provided generator matrix. #' @usage rctmc(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, #' out.type = "list") #' #' @param n The number of samples to generate. #' @param ctmc The CTMC S4 object. #' @param initDist The initial distribution of states. #' @param T The time up to which the simulation runs (all transitions after time #' T are not returned). #' @param include.T0 Flag to determine if start state is to be included. #' @param out.type "list" or "df" #' #' @details In order to use the T0 argument, set n to Inf. #' @return Based on out.type, a list or a data frame is returned. The returned #' list has two elements - a character vector (states) and a numeric vector #' (indicating time of transitions). The data frame is similarly structured. #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences #' (2013), David F. Anderson, University of Wisconsin at Madison #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{generatorToTransitionMatrix}},\code{\link{ctmc-class}} #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' #' statesDist <- c(0.8, 0.2) #' rctmc(n = Inf, ctmc = molecularCTMC, T = 1) #' rctmc(n = 5, ctmc = molecularCTMC, initDist = statesDist, include.T0 = FALSE) #' @export rctmc <- function(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, out.type = "list") { if (identical(initDist, numeric())) state <- sample(ctmc@states, 1) # sample state randomly else if (length(initDist) != dim(ctmc) | round(sum(initDist), 5) != 1) stop("Error! Provide a valid initial state probability distribution") else state <- sample(ctmc@states, 1, prob = initDist) # if valid probability distribution, # sample accordingly # obtain transition probability matrix from the generator matrix trans <- generatorToTransitionMatrix(ctmc@generator) states <- c() time <- c() if (include.T0 == TRUE){ states <- c(states, state) time <- c(time, 0) } t <- 0 i <- 1 while (i <= n){ idx <- which(ctmc@states == state) if (ctmc@generator[idx, idx] == 0) { # absorbing state; stay here forever t <- Inf } else { t <- t + rexp(1, -ctmc@generator[idx, idx]) } if((T > 0 & t > T) | (is.infinite(t))) break state <- ctmc@states[sample(1:dim(ctmc), 1, prob = trans[idx, ])] states <- c(states, state) time <- c(time, t) i <- i + 1 } out <- list(states, time) if (out.type == "list") return(out) else if(out.type == "df"){ df <- data.frame(matrix(unlist(out), nrow = length(states))) names(df) <- c("states", "time") return(df) } else stop("Not a valid output type") } #' @title Return the generator matrix for a corresponding transition matrix #' #' @description Calculate the generator matrix for a #' corresponding transition matrix #' #' @param P transition matrix between time 0 and t #' @param t time of observation #' @param method "logarithm" returns the Matrix logarithm of the transition matrix #' #' @return A matrix that represent the generator of P #' @export #' #' @examples #' mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE) #' Q <- transition2Generator(P = mymatr) #' expm::expm(Q) #' #' @seealso \code{\link{rctmc}} transition2Generator<-function(P, t = 1,method = "logarithm") { if (method == "logarithm") { Q = logm(P)/t } #else return(Q) } #' Returns a generator matrix corresponding to frequency matrix #' #' @description The function provides interface to calculate generator matrix corresponding to #' a frequency matrix and time taken #' #' @param P relative frequency matrix #' @param t (default value = 1) #' @param method one among "QO"(Quasi optimaisation), "WA"(weighted adjustment), "DA"(diagonal adjustment) #' @param logmethod method for computation of matrx algorithm (by default : Eigen) #' #' @return returns a generator matix with same dimnames #' #' @references E. Kreinin and M. Sidelnikova: Regularization Algorithms for #' Transition Matrices. Algo Research Quarterly 4(1):23-40, 2001 #' #' @examples #' sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) #' sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) #' freq2Generator(sample_rel,1) #' #' data(tm_abs) #' tm_rel=rbind((tm_abs/rowSums(tm_abs))[1:7,],c(rep(0,7),1)) #' ## Derive quasi optimization generator matrix estimate #' freq2Generator(tm_rel,1) #' #' @export #' freq2Generator <- function(P,t = 1,method = "QO",logmethod = "Eigen"){ if(requireNamespace('ctmcd', quietly = TRUE)) { if(method == "QO"){ out <- ctmcd::gmQO(P, t, logmethod) } else if(method == "WA") { out <- ctmcd::gmWA(P, t, logmethod) } else if(method == "DA") { out <- ctmcd::gmDA(P, t, logmethod) } } else { warning("package ctmcd is not installed") out = NULL } return(out) } #' @title Returns expected hitting time from state i to state j #' #' @description Returns expected hitting time from state i to state j #' #' @usage ExpectedTime(C,i,j,useRCpp) #' #' @param C A CTMC S4 object #' @param i Initial state i #' @param j Final state j #' @param useRCpp logical whether to use Rcpp #' #' @details According to the theorem, holding times for all states except j should be greater than 0. #' #' @return A numerical value that returns expected hitting times from i to j #' #' @references Markovchains, J. R. Norris, Cambridge University Press #' #' @author Vandit Jain #' #' @examples #' states <- c("a","b","c","d") #' byRow <- TRUE #' gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), #' nrow = 4,byrow = byRow, dimnames = list(states,states)) #' ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") #' ExpectedTime(ctmc,1,4,TRUE) #' #' @export ExpectedTime <- function(C,i,j,useRCpp = TRUE){ # take generator from ctmc-class object Q <- C@generator # in case where generator is written column wise if(C@byrow==FALSE){ Q <- t(Q) } NoofStates <- dim(C) Exceptj <- c(1:NoofStates) # create vector with all values from 1:NoofStates except j Exceptj <- which(Exceptj!=j) # build matrix with vlaues from Q such that row!=j or column!=j Q_Exceptj <- Q[Exceptj,Exceptj] # check for positivity of holding times except for state j if(!all(diag(Q_Exceptj)!=0)){ stop("Holding times for all states except j should be greater than 0") } # get b for solving the system of linear equation Ax = b where A is Q_Exceptj b <- rep(-1,dim(Q_Exceptj)[1]) # use solve function from base packge to solve Ax = b if(useRCpp == TRUE){ out <- .ExpectedTimeRCpp(Q_Exceptj,b) } else { out <- solve(Q_Exceptj,b) } # out will be of size NoofStates-1, hence the adjustment for different cases of i>= NoofStates || x0 < 1){ stop("Initial state provided is not correct") } return(P[x0,]) } } #' Calculating full conditional probability using lower rate transition matrix #' #' This function calculates full conditional probability at given #' time s using lower rate transition matrix #' #' @usage impreciseProbabilityatT(C,i,t,s,error,useRCpp) #' #' @param C a ictmc class object #' @param i initial state at time t #' @param t initial time t. Default value = 0 #' @param s final time #' @param error error rate. Default value = 0.001 #' @param useRCpp logical whether to use RCpp implementation; by default TRUE #' #' @references Imprecise Continuous-Time Markov Chains, Thomas Krak et al., 2016 #' #' @author Vandit Jain #' #' @examples #' states <- c("n","y") #' Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) #' range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) #' name <- "testictmc" #' ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) #' impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) #' @export impreciseProbabilityatT <- function(C, i, t=0, s, error = 10^-3, useRCpp = TRUE){ ## input validity checking if(s <= t){ stop("Please provide time points such that initial time is greater than or equal to end point") } if(!is(C,'ictmc')){ stop("Please provide a valid ictmc-class object") } noOfstates <-length(C@states) if(i <= 0 || i > noOfstates){ stop("Please provide a valid initial state") } ### validity checking ends if(useRCpp == TRUE) { Qgx <- .impreciseProbabilityatTRCpp(C,i,t,s,error) } else { ## extract values from ictmc object Q <- C@Q range <- C@range ### calculate ||QI_i|| #initialise Q norm value QNorm <- -1 for(i in 1:noOfstates){ sum <- 0 for(j in 1:noOfstates){ sum <- sum + abs(Q[i,j]) } QNorm <- max(sum*range[i,2],QNorm) } ### calculate no of iterations # The 1 is for norm of I_s i.e. ||I_s|| which equals 1 n <- max((s-t)*QNorm, (s-t)*(s-t)*QNorm*QNorm*1/(2*error)) ### calculate delta delta <- (s-t)/n ### build I_i vector Ii <- rep(0, noOfstates) Ii[i] <- 1 ### calculate value of lower operator _QI_i(x) for all x belongs to no ofStates values <- Q%*%Ii Qgx <- rep(0, noOfstates) for(i in 1:noOfstates){ Qgx[i] <- min(values[i]*range[i,1], values[i]*range[i,2]) } Qgx <- delta*Qgx Qgx <- Ii + Qgx for(iter in 1:n-1){ temp <- Qgx values <- Q%*%Qgx for(i in 1:noOfstates){ Qgx[i] <- min(values[i]*range[i,1], values[i]*range[i,2]) } Qgx <- delta*Qgx Qgx <- temp + Qgx } } return(Qgx) } #' Check if CTMC is irreducible #' #' @description #' This function verifies whether a CTMC object is irreducible #' #' @usage is.CTMCirreducible(ctmc) #' #' @param ctmc a ctmc-class object #' #' @references #' Continuous-Time Markov Chains, Karl Sigman, Columbia University #' #' @author Vandit Jain #' #' @return a boolean value as described above. #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' is.CTMCirreducible(molecularCTMC) #' #' @export is.CTMCirreducible <- function(ctmc) { if(!is(ctmc, 'ctmc') ) { stop("please provide a valid ctmc class object") } ## gets the embeded chain matrix embeddedChainMatrix <- generatorToTransitionMatrix(ctmc@generator) ## forms a markovchain object related to embedded transition matrix markovchainObject <- new("markovchain",states = ctmc@states, transitionMatrix = embeddedChainMatrix) ## returns result using is.irreducible function on embedded chain transition matrix return(is.irreducible(markovchainObject)) } #' checks if ctmc object is time reversible #' #' @description #' The function returns checks if provided function is time reversible #' #' @usage is.TimeReversible(ctmc) #' #' @param ctmc a ctmc-class object #' #' @return Returns a boolean value stating whether ctmc object is time reversible #' #' @author Vandit Jain #' #' @return a boolean value as described above #' #' @references #' INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' is.TimeReversible(molecularCTMC) #' #' @export is.TimeReversible <- function(ctmc) { if(!is(ctmc,"ctmc")) { stop("please provide a valid ctmc-class object") } ## get steady state probabilities Pi <- steadyStates(ctmc) ## initialise boolean result check <- TRUE ## no of states m <- length(ctmc@states) ## checks for byrow if(ctmc@byrow == FALSE) gen <- t(ctmc@generator) else gen <- ctmc@generator ## iterates for every state for( i in 1:m) { for(j in 1:m) { if(Pi[i]*gen[i,j] != Pi[j]*gen[j,i]) { check <- FALSE break } } } return(check) } # `generator/nextki` <- function(k) { # if(k >= 0) return(-1-k) # return(-k) # } # # `generator/nextk` <- function(k, Kmin, Kmax) { # if(is.null(k)) { # k <- rep(0, length(Kmin)) # return(list(ans = TRUE, k = k)) # } # # if(length(Kmin) == 0) { # return(list(ans = FALSE, k = k)) # } # # i <- 1 # kl <- k # kl[i] <- `generator/nextki`(kl[i]) # while (kl[i] > Kmax[i] || kl[i] < Kmin[i]) { # kl[i] <- 0 # i <- i+1 # if(i > length(kl)) { # k <- kl # return(list(ans = FALSE, k = k)) # } # kl[i] <- `generator/nextki`(kl[i]) # } # k <- kl # return(list(ans = TRUE, k = k)) # } # # `generator/generator` <- function(Po, Di, odi) { # P <- Po # N <- nrow(P) # # if(Di > 22) return(NULL) # bad idea # options(digits = Di) # odigs <- odi # # rSum <- rowSums(P) # if(! all(abs(1-rSum) < 0.001)) { # stop("Sum of each rows of Po should be equal to 1") # } # # P <- P/rSum # d <- det(P) # # if(d <= 0) { # cat("Matrix has non-positive determinant") # return(NULL) # } # # diagP <- 1 # for(i in 1:nrow(P)) diagP <- diagP * P[i, i] # # if(d >= diagP) { # cat("Determinant exceeds product of diagonal elements\n") # return(NULL) # } # # E <- eigen(P)[[1]] # B <- eigen(P)[[2]] # # print("Eigenvalues") # print(E) # # # risky # if(length(unique(E)) != length(E)) { # warning("Matrix does not have distinct eigenvalues") # } # # L <- abs(log(d)) # addigs <- 2 + round(log10(1/Matrix::rcond(B))) + round(L/log(10)) # problem # # if(options()$digits < odigs + addigs) { # if(odigs + addigs > 100) { # print("Eigenvector matrix is singular") # return(NULL) # } # # cat('Going to', odigs + addigs, "digits") # return(`generator/generator`(Po, odigs + addigs, odigs)) # } # # Bi <- solve(B) # # posevs <- NULL # negevs <- NULL # bestj <- NULL # bestQ <- NULL # marks <- rep(TRUE, length(E)) # # for(i in 1:length(E)) { # if(marks[i] && !(Re(E[i]) > 0 && Im(E[i]) == 0)) { # invalid comparison of complex number # cj <- Conj(E[i]) # best <- Inf # if(i+1 <= length(E)) { # for(j in (i+1):length(E)) { # if(marks[j]) { # score <- abs(cj-E[j]) # if(score < best) { # best <- score # bestj <- j # } # } # } # } # # if(best > 10^(3-options()$digits)) { # cat("Unpaired non-positive eigenvalue", E[i]) # return(NULL) # } # marks[bestj] <- FALSE # if(Im(E[i]) >= 0) { # posevs <- c(posevs, i) # negevs <- c(negevs, bestj) # if(Im(E[bestj]) == 0) { # E[bestj] <- complex(real = E[bestj], imaginary = 0) # } # } else { # posevs <- c(posevs, bestj) # negevs <- c(negevs, i) # if(Im(E[i]) == 0) { # E[i] <- complex(real = E[i], imaginary = 0) # } # } # } # } # # npairs <- length(posevs) # # display conjugate pairs # # Kmax <- rep(0, npairs) # Kmin <- Kmax # # for(i in 1:npairs) { # a <- Arg(E[posevs[i]]) # Kmax[i] <- trunc((L-a)/2*pi) # Kmin[i] <- trunc((-L-a)/2*pi) # } # # # display K-max # # display K-min # # best <- -0.001 # DD <- diag(log(E)) # DK <- matlab::zeros(N) # res <- list(); p <- 1 # k <- NULL # while(TRUE) { # # dlist <- `generator/nextk`(k, Kmin, Kmax) # k <- dlist$k # # if(dlist$ans == FALSE) {break} # # # display value of k # for(i in 1:npairs) { # ke <- complex(real = 0, imaginary = 2*pi*k[i]) # DK[posevs[i], posevs[i]] <- ke # DK[negevs[i], negevs[i]] <- -ke # } # # Q <- B %*% (DD + DK) %*% Bi # # Q <- fnormal(Re(Q), options()$digits, 5*(10^(-1-odigs))) # define fnormal of maple # qmin <- Q[1,2] # for(i in 1:N) { # for(j in 1:N) { # if(i != j) { # if(Q[i, j] < qmin) qmin <- Q[i, j] # } # } # } # # if(EnvAllGenerators == TRUE) { # if(qmin > -.001) { # cat("Possible generator with qmin =", qmin) # res[[p]] <- round(Q, odigs) # p <- p + 1 # } else { # cat("qmin =", qmin) # } # # } else { # if(qmin >= 0) { # cat("Found a generator") # return(round(Q, odigs)) # } else { # if(qmin > best) { # best <- qmin # bestQ <- Q # } # if(qmin > -.001) { # cat("Approximate generator with qmin = ", qmin) # } else { # cat("qmin =", qmin) # } # } # } # } # # if(EnvAllGenerators == TRUE) { # return(res) # } # # warning("No completely valid generator found") # # if(! is.null(bestQ)) { # return(round(bestQ, odigs)) # } else return(NULL) # # } # # generator <- function(Po, digits = 10) { # odigs <- digits # options(digits = 15) # if(is.matrix(Po)) { # P <- Po # } else { # stop("Po must be matrix") # } # # if(nrow(P) != ncol(P)) { # print(P) # stop('Po must be square matrix') # } # # if(! all(P >= 0)) { # print(P) # stop('Po must be non negative square matrix') # } # # `generator/generator`(P, options()$digits, odigs) # } markovchain/R/RcppExports.R0000644000176200001440000005016315137710161015400 0ustar liggesusers# Generated by using Rcpp::compileAttributes() -> do not edit by hand # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 .isGenRcpp <- function(gen) { .Call(`_markovchain_isGen`, gen) } #' @name generatorToTransitionMatrix #' @title Function to obtain the transition matrix from the generator #' @description The transition matrix of the embedded DTMC is inferred from the CTMC's generator #' #' @usage generatorToTransitionMatrix(gen, byrow = TRUE) #' #' @param gen The generator matrix #' @param byrow Flag to determine if rows (columns) sum to 0 #' @return Returns the transition matrix. #' #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. #' Anderson, University of Wisconsin at Madison #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{rctmc}},\code{\link{ctmc-class}} #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' generatorToTransitionMatrix(gen) #' #' @export generatorToTransitionMatrix <- function(gen, byrow = TRUE) { .Call(`_markovchain_generatorToTransitionMatrix`, gen, byrow) } #' @name ctmcFit #' @title Function to fit a CTMC #' @description This function fits the underlying CTMC give the state #' transition data and the transition times using the maximum likelihood #' method (MLE) #' @usage ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) #' @param data It is a list of two elements. The first element is a character #' vector denoting the states. The second is a numeric vector denoting the #' corresponding transition times. #' @param byrow Determines if the output transition probabilities of the #' underlying embedded DTMC are by row. #' @param name Optional name for the CTMC. #' @param confidencelevel Confidence level for the confidence interval #' construnction. #' @return It returns a list containing the CTMC object and the confidence intervals. #' #' @details Note that in data, there must exist an element wise corresponding #' between the two elements of the list and that data[[2]][1] is always 0. #' @references Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{rctmc}} #' #' @examples #' data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) #' ctmcFit(data) #' #' @export #' ctmcFit <- function(data, byrow = TRUE, name = "", confidencelevel = 0.95) { .Call(`_markovchain_ctmcFit`, data, byrow, name, confidencelevel) } .ExpectedTimeRCpp <- function(x, y) { .Call(`_markovchain_ExpectedTimeRcpp`, x, y) } .probabilityatTRCpp <- function(y) { .Call(`_markovchain_probabilityatTRCpp`, y) } .impreciseProbabilityatTRCpp <- function(C, i, t, s, error) { .Call(`_markovchain_impreciseProbabilityatTRCpp`, C, i, t, s, error) } #' @export seq2freqProb <- function(sequence) { .Call(`_markovchain_seq2freqProb`, sequence) } #' @export seq2matHigh <- function(sequence, order) { .Call(`_markovchain_seq2matHigh`, sequence, order) } .markovchainSequenceRcpp <- function(n, markovchain, t0, include_t0 = FALSE) { .Call(`_markovchain_markovchainSequenceRcpp`, n, markovchain, t0, include_t0) } .markovchainListRcpp <- function(n, object, include_t0 = FALSE, t0 = character()) { .Call(`_markovchain_markovchainListRcpp`, n, object, include_t0, t0) } .markovchainSequenceParallelRcpp <- function(listObject, n, include_t0 = FALSE, init_state = character()) { .Call(`_markovchain_markovchainSequenceParallelRcpp`, listObject, n, include_t0, init_state) } #' @rdname markovchainFit #' #' @export createSequenceMatrix <- function(stringchar, toRowProbs = FALSE, sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain_createSequenceMatrix`, stringchar, toRowProbs, sanitize, possibleStates) } .mcListFitForList <- function(data) { .Call(`_markovchain_mcListFitForList`, data) } .matr2Mc <- function(matrData, laplacian = 0, sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain__matr2Mc`, matrData, laplacian, sanitize, possibleStates) } .list2Mc <- function(data, laplacian = 0, sanitize = FALSE) { .Call(`_markovchain__list2Mc`, data, laplacian, sanitize) } #' @name inferHyperparam #' @title Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set #' @description Since the Bayesian inference approach implemented in the package is based on conjugate priors, #' hyperparameters must be provided to model the prior probability distribution of the chain #' parameters. The hyperparameters are inferred from a given a priori matrix under the assumption #' that the matrix provided corresponds to the mean (expected) values of the chain parameters. A #' scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred #' from a data set. #' #' @param transMatr A valid transition matrix, with dimension names. #' @param scale A vector of scaling factors, each element corresponds to the row names of the provided transition #' matrix transMatr, in the same order. #' @param data A data set from which the hyperparameters are inferred. #' #' @details transMatr and scale need not be provided if data is provided. #' @return Returns the hyperparameter matrix in a list. #' #' @note The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, #' and the elements in the matrix are correspondingly permuted. #' #' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R #' package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi, Giorgio Spedicato #' @seealso \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} #' #' @examples #' data(rain, package = "markovchain") #' inferHyperparam(data = rain$rain) #' #' weatherStates <- c("sunny", "cloudy", "rain") #' weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, #' 0.3, 0.4, 0.3, #' 0.2, 0.4, 0.4), #' byrow = TRUE, nrow = 3, #' dimnames = list(weatherStates, weatherStates)) #' inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) #' #' @export #' inferHyperparam <- function(transMatr = matrix(), scale = numeric(), data = character()) { .Call(`_markovchain_inferHyperparam`, transMatr, scale, data) } #' @name markovchainFit #' @title Function to fit a discrete Markov chain #' @description Given a sequence of states arising from a stationary state, #' it fits the underlying Markov chain distribution using either MLE (also using a #' Laplacian smoother), bootstrap or by MAP (Bayesian) inference. #' #' @param data It can be a character vector or a \deqn{n x n} matrix or a \deqn{n x n} data frame or a list #' @param method Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace" #' @param byrow it tells whether the output Markov chain should show the transition probabilities by row. #' @param nboot Number of bootstrap replicates in case "bootstrap" is used. #' @param laplacian Laplacian smoothing parameter, default zero. It is only used when "laplace" method #' is chosen. #' @param name Optional character for name slot. #' @param parallel Use parallel processing when performing Boostrap estimates. #' @param confidencelevel \deqn{\alpha} level for conficence intervals width. #' Used only when \code{method} equal to "mle". #' @param confint a boolean to decide whether to compute Confidence Interval or not. #' @param hyperparam Hyperparameter matrix for the a priori distribution. If none is provided, #' default value of 1 is assigned to each parameter. This must be of size #' \deqn{k x k} where k is the number of states in the chain and the values #' should typically be non-negative integers. #' @param stringchar It can be a \deqn{n x n} matrix or a character vector or a list #' @param toRowProbs converts a sequence matrix into a probability matrix #' @param sanitize put 1 in all rows having rowSum equal to zero #' @param possibleStates Possible states which are not present in the given sequence #' #' @details Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} #' contain \code{NAs}, the related \code{NA} containing transitions will be ignored. #' #' @return A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix #' of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method #' is used, the lower and upper confidence bounds are returned along with the standard error. #' The "map" method also returns the expected value of the parameters with respect to the #' posterior distribution. #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, #' and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, #' Alfred Hubler, Santa Fe Institute #' #' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R #' package version 0.2.5 #' #' @author Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi #' @note This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". #' In addition, parallel facility is not complete, involving only a part of the bootstrap process. #' When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is #' currently available. #' #' @seealso \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} #' @examples #' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", #' "b", "b", "b", "a") #' sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) #' mcFitMLE <- markovchainFit(data = sequence) #' mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") #' #' na.sequence <- c("a", NA, "a", "b") #' # There will be only a (a,b) transition #' na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) #' mcFitMLE <- markovchainFit(data = na.sequence) #' #' # data can be a list of character vectors #' sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) #' mcFitMap <- markovchainFit(sequences, method = "map") #' mcFitMle <- markovchainFit(sequences, method = "mle") #' @rdname markovchainFit #' #' @export #' markovchainFit <- function(data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, confint = TRUE, hyperparam = matrix(), sanitize = FALSE, possibleStates = character()) { .Call(`_markovchain_markovchainFit`, data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, confint, hyperparam, sanitize, possibleStates) } .noofVisitsDistRCpp <- function(matrix, i, N) { .Call(`_markovchain_noofVisitsDistRCpp`, matrix, i, N) } .multinomialCIForRowRcpp <- function(x, confidencelevel) { .Call(`_markovchain_multinomialCIForRow`, x, confidencelevel) } .multinomialCIRcpp <- function(transMat, seqMat, confidencelevel) { .Call(`_markovchain_multinomCI`, transMat, seqMat, confidencelevel) } .commClassesKernelRcpp <- function(P) { .Call(`_markovchain_commClassesKernel`, P) } .communicatingClassesRcpp <- function(object) { .Call(`_markovchain_communicatingClasses`, object) } .transientStatesRcpp <- function(object) { .Call(`_markovchain_transientStates`, object) } .recurrentStatesRcpp <- function(object) { .Call(`_markovchain_recurrentStates`, object) } .recurrentClassesRcpp <- function(object) { .Call(`_markovchain_recurrentClasses`, object) } .transientClassesRcpp <- function(object) { .Call(`_markovchain_transientClasses`, object) } .reachabilityMatrixRcpp <- function(obj) { .Call(`_markovchain_reachabilityMatrix`, obj) } .isAccessibleRcpp <- function(obj, from, to) { .Call(`_markovchain_isAccessible`, obj, from, to) } .summaryKernelRcpp <- function(object) { .Call(`_markovchain_summaryKernel`, object) } .firstpassageKernelRcpp <- function(P, i, n) { .Call(`_markovchain_firstpassageKernel`, P, i, n) } .firstPassageMultipleRCpp <- function(P, i, setno, n) { .Call(`_markovchain_firstPassageMultipleRCpp`, P, i, setno, n) } .expectedRewardsRCpp <- function(matrix, n, rewards) { .Call(`_markovchain_expectedRewardsRCpp`, matrix, n, rewards) } .expectedRewardsBeforeHittingARCpp <- function(matrix, s0, rewards, n) { .Call(`_markovchain_expectedRewardsBeforeHittingARCpp`, matrix, s0, rewards, n) } .gcdRcpp <- function(a, b) { .Call(`_markovchain_gcd`, a, b) } #' @rdname structuralAnalysis #' #' @export period <- function(object) { .Call(`_markovchain_period`, object) } #' @title predictiveDistribution #' #' @description The function computes the probability of observing a new data #' set, given a data set #' @usage predictiveDistribution(stringchar, newData, hyperparam = matrix()) #' #' @param stringchar This is the data using which the Bayesian inference is #' performed. #' @param newData This is the data whose predictive probability is computed. #' @param hyperparam This determines the shape of the prior distribution of the #' parameters. If none is provided, default value of 1 is assigned to each #' parameter. This must be of size kxk where k is the number of states in the #' chain and the values should typically be non-negative integers. #' @return The log of the probability is returned. #' #' @details The underlying method is Bayesian inference. The probability is #' computed by averaging the likelihood of the new data with respect to the #' posterior. Since the method assumes conjugate priors, the result can be #' represented in a closed form (see the vignette for more details), which is #' what is returned. #' @references #' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, #' and Out-of-Class Modeling, Christopher C. Strelioff, James P. #' Crutchfield, Alfred Hubler, Santa Fe Institute #' #' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov #' Chains. R package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi #' @seealso \code{\link{markovchainFit}} #' @examples #' sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", #' "b", "b", "b", "a") #' hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) #' predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) #' hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] #' predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) #' predProb2==predProb #' @export #' predictiveDistribution <- function(stringchar, newData, hyperparam = matrix()) { .Call(`_markovchain_predictiveDistribution`, stringchar, newData, hyperparam) } #' @title priorDistribution #' #' @description Function to evaluate the prior probability of a transition #' matrix. It is based on conjugate priors and therefore a Dirichlet #' distribution is used to model the transitions of each state. #' @usage priorDistribution(transMatr, hyperparam = matrix()) #' #' @param transMatr The transition matrix whose probability is the parameter of #' interest. #' @param hyperparam The hyperparam matrix (optional). If not provided, a #' default value of 1 is assumed for each and therefore the resulting #' probability distribution is uniform. #' @return The log of the probabilities for each state is returned in a numeric #' vector. Each number in the vector represents the probability (log) of #' having a probability transition vector as specified in corresponding the #' row of the transition matrix. #' #' @details The states (dimnames) of the transition matrix and the hyperparam #' may be in any order. #' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First #' Order Markov Chains. R package version 0.2.5 #' #' @author Sai Bhargav Yalamanchi, Giorgio Spedicato #' #' @note This function can be used in conjunction with inferHyperparam. For #' example, if the user has a prior data set and a prior transition matrix, #' he can infer the hyperparameters using inferHyperparam and then compute #' the probability of their prior matrix using the inferred hyperparameters #' with priorDistribution. #' @seealso \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} #' #' @examples #' priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), #' nrow = 2, #' dimnames = list(c("a", "b"), c("a", "b"))), #' matrix(c(2, 2, 2, 2), #' nrow = 2, #' dimnames = list(c("a", "b"), c("a", "b")))) #' @export priorDistribution <- function(transMatr, hyperparam = matrix()) { .Call(`_markovchain_priorDistribution`, transMatr, hyperparam) } .hittingProbabilitiesRcpp <- function(object) { .Call(`_markovchain_hittingProbabilities`, object) } .canonicFormRcpp <- function(obj) { .Call(`_markovchain_canonicForm`, obj) } .steadyStatesRcpp <- function(obj) { .Call(`_markovchain_steadyStates`, obj) } .absorbingStatesRcpp <- function(obj) { .Call(`_markovchain_absorbingStates`, obj) } .isIrreducibleRcpp <- function(obj) { .Call(`_markovchain_isIrreducible`, obj) } .isRegularRcpp <- function(obj) { .Call(`_markovchain_isRegular`, obj) } .meanAbsorptionTimeRcpp <- function(obj) { .Call(`_markovchain_meanAbsorptionTime`, obj) } .absorptionProbabilitiesRcpp <- function(obj) { .Call(`_markovchain_absorptionProbabilities`, obj) } .meanFirstPassageTimeRcpp <- function(obj, destination) { .Call(`_markovchain_meanFirstPassageTime`, obj, destination) } .meanRecurrenceTimeRcpp <- function(obj) { .Call(`_markovchain_meanRecurrenceTime`, obj) } .minNumVisitsRcpp <- function(obj) { .Call(`_markovchain_meanNumVisits`, obj) } .isProbability <- function(prob) { .Call(`_markovchain_isProb`, prob) } .isStochasticMatrix <- function(m, byrow) { .Call(`_markovchain_isStochasticMatrix`, m, byrow) } .isProbabilityVector <- function(prob) { .Call(`_markovchain_isProbVector`, prob) } .testthatIsAccesibleRcpp <- function(obj) { .Call(`_markovchain_checkIsAccesibleMethod`, obj) } .approxEqualMatricesRcpp <- function(a, b) { .Call(`_markovchain_approxEqual`, a, b) } .testthatIsPartitionRcpp <- function(commClasses, states) { .Call(`_markovchain_isPartition`, commClasses, states) } .testthatAreHittingRcpp <- function(probs, hitting, byrow) { .Call(`_markovchain_areHittingProbabilities`, probs, hitting, byrow) } .testthatAreMeanNumVisitsRcpp <- function(probs, numVisits, hitting, byrow) { .Call(`_markovchain_areMeanNumVisits`, probs, numVisits, hitting, byrow) } .testthatRecurrentHittingRcpp <- function(recurrentClasses, hitting, states, byrow) { .Call(`_markovchain_recurrentHitting`, recurrentClasses, hitting, states, byrow) } .testthatHittingAreOneRcpp <- function(matrix) { .Call(`_markovchain_hittingProbsAreOne`, matrix) } .testthatAbsorbingAreRecurrentClassRcpp <- function(absorbingStates, recurrentClasses) { .Call(`_markovchain_absorbingAreRecurrentClass`, absorbingStates, recurrentClasses) } markovchain/R/fittingFunctions.R0000644000176200001440000006227515137702633016460 0ustar liggesusers#' Function to generate a sequence of states from homogeneous Markov chains. #' #' Provided any \code{markovchain} object, it returns a sequence of #' states coming from the underlying stationary distribution. #' #' @param n Sample size #' @param markovchain \code{markovchain} object #' @param t0 The initial state #' @param include.t0 Specify if the initial state shall be used #' @param useRCpp Boolean. Should RCpp fast implementation being used? Default is yes. #' #' @details A sequence of size n is sampled. #' #' @return A Character Vector #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @seealso \code{\link{markovchainFit}} #' #' @examples #' # define the markovchain object #' statesNames <- c("a", "b", "c") #' mcB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), #' nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' # show the sequence #' outs <- markovchainSequence(n = 100, markovchain = mcB, t0 = "a") #' #' @export markovchainSequence <-function (n, markovchain, t0 = sample(markovchain@states, 1), include.t0 = FALSE, useRCpp = TRUE) { # validate requested sequence length if (!is.numeric(n) || length(n) != 1 || is.na(n) || !is.finite(n)) { stop("`n` must be a finite numeric scalar") } if (!isTRUE(all.equal(n, round(n)))) { stop("`n` must be an integer value") } n <- as.integer(n) if (n < 0) { stop("`n` must be greater than or equal to 0") } # check whether given initial state is possible state or not if (!(t0 %in% markovchain@states)) stop("Error! Initial state not defined") # call to cpp implmentation of markovchainSequence if (useRCpp) { return(.markovchainSequenceRcpp(n, markovchain, t0, include.t0)) } # R implementation of the function # create a sequence of size n initially not initialized chain <- character(n) # initial state state <- t0 # populate the sequence for (i in seq_len(n)) { # row probabilty corresponding to the current state rowProbs <- markovchain@transitionMatrix[state, ] # select the next state outstate <- sample(size = 1, x = markovchain@states, prob = rowProbs) # store the new state chain[i] <- outstate # update the current state state <- outstate } # output out <- chain # whether to include initial state or not if (include.t0) { out <- c(t0, out) } return(out) } ################## # random sampler # ################## # check if the subsequent states are included in the previous ones # check the validity of non homogeneous markovchain list # object is a list of markovchain object .checkSequence <- function(object) { # assume non homogeneous markovchain list is valid out <- TRUE # list of one transition matrix implies valid if (length(object) == 1) { return(out) } # if number of transition matrices are more than one for (i in 2:length(object)) { # select the states which are reachable in one step if(object[[i - 1]]@byrow) { reachable <- (colSums(object[[i - 1]]@transitionMatrix) != 0) } else { reachable <- (rowSums(object[[i - 1]]@transitionMatrix) != 0) } # possible states in the previous markovchain object statesNm1 <- states(object[[i - 1]])[reachable] # possible states in the current markovchain object statesN <- states(object[[i]]) # common states intersection <- intersect(statesNm1, statesN) # condition to check whether statesNm1 is a subset of statesN or not if (setequal(intersection, statesNm1) == FALSE) { out <- FALSE break } } return(out) } #' Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains. #' #' Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of #' states coming from the underlying stationary distribution. #' #' @param n Sample size #' @param object Either a \code{markovchain} or a \code{markovchainList} object #' @param what It specifies whether either a \code{data.frame} or a \code{matrix} #' (each rows represent a simulation) or a \code{list} is returned. #' @param useRCpp Boolean. Should RCpp fast implementation being used? Default is yes. #' @param parallel Boolean. Should parallel implementation being used? Default is yes. #' @param num.cores Number of Cores to be used #' @param ... additional parameters passed to the internal sampler #' #' @details When a homogeneous process is assumed (\code{markovchain} object) a sequence is #' sampled of size n. When a non - homogeneous process is assumed, #' n samples are taken but the process is assumed to last from the begin to the end of the #' non-homogeneous markov process. #' #' @return Character Vector, data.frame, list or matrix #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @note Check the type of input #' #' @seealso \code{\link{markovchainFit}}, \code{\link{markovchainSequence}} #' #' @examples #' # define the markovchain object #' statesNames <- c("a", "b", "c") #' mcB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), #' nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' # show the sequence #' outs <- rmarkovchain(n = 100, object = mcB, what = "list") #' #' #' #define markovchainList object #' statesNames <- c("a", "b", "c") #' mcA <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mcB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mcC <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) #' #' # show the list of sequence #' rmarkovchain(100, mclist, "list") #' #' @export rmarkovchain <- function(n, object, what = "data.frame", useRCpp = TRUE, parallel = FALSE, num.cores = NULL, ...) { # check the class of the object if (is(object,"markovchain")) { out <- markovchainSequence(n = n, markovchain = object, useRCpp = useRCpp, ...) return(out) } if (is(object,"markovchainList")) { ####################################################### if(useRCpp && !parallel) { # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() # call fast cpp function dataList <- .markovchainListRcpp(n, object@markovchains, include.t0, t0) # format in which results to be returned if (what == "data.frame") { out <- data.frame(iteration = dataList[[1]], values = dataList[[2]]) } else { # output in matrix format # each row is an independent sequence out <- matrix(data = dataList[[2]], nrow = n, byrow = TRUE) # output in list format if (what == "list") { # outlist <- list() # for (i in 1:nrow(out)) # outlist[[i]] <- out[i, ] # out <- outlist out <- as.list(data.frame(t(out), stringsAsFactors = FALSE)) out <- unname(out) } } return(out) } ########################################################## if(useRCpp && parallel) { # Calculate the number of cores # It's not good to use all cores no_cores <- max(1,parallel::detectCores() - 1) # number of cores specified should be less than or equal to maximum cores available if((! is.null(num.cores)) && num.cores <= no_cores + 1 && num.cores >= 1) { no_cores <- num.cores } RcppParallel::setThreadOptions(no_cores) # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() dataList <- .markovchainSequenceParallelRcpp(object, n, include.t0, t0) if(what == "list") return(dataList) # dimension of matrix to be returned nrow <- length(dataList) ncol <- length(dataList[[1]]) if(what == "matrix") { out <- matrix(unlist(dataList), nrow = nrow, ncol = ncol, byrow = TRUE) # for(i in 1:nrow) out[i, ] <- dataList[[i]] return(out) } iteration <- unlist(lapply(1:nrow, rep, times = ncol)) values <- unlist(dataList) # if what id data frame # for(i in 1:nrow) { # iteration <- c(iteration, rep(i, ncol)) # values <- append(values, dataList[[i]]) # } return(data.frame(iteration = iteration, values = values)) } ########################################################## if(!useRCpp && parallel) { # if include.t0 is not passed as extra argument then set include.t0 as false include.t0 <- list(...)$include.t0 include.t0 <- ifelse(is.null(include.t0), FALSE, include.t0) # check whether initial state is passed or not t0 <- list(...)$t0 if (is.null(t0)) t0 <- character() dataList <- .markovchainSequenceParallel(n, object, t0, num.cores, include.t0) if(what == "list") return(dataList) # dimension of matrix to be returned nrow <- length(dataList) ncol <- length(dataList[[1]]) if(what == "matrix") { out <- matrix(nrow = nrow, ncol = ncol) for(i in 1:nrow) out[i, ] <- dataList[[i]] return(out) } iteration <- numeric() values <- character() # if what id data frame for(i in 1:nrow) { iteration <- append(iteration, rep(i, ncol)) values <- append(values, dataList[[i]]) } return(data.frame(iteration = iteration, values = values)) } ########################################################## # store list of markovchain object in object object <- object@markovchains # check the validity of markovchainList object verify <- .checkSequence(object = object) # show warning if sequence is invalid if (!verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!") } # helper vector iteration <- numeric() values <- character() # create one sequence in each iteration for (i in 1:n) { # the first iteration may include initial state sampledValues <- markovchainSequence(n = 1, markovchain = object[[1]], ...) outIter <- rep(i, length(sampledValues)) # number of markovchain objects are more than one if (length(object) > 1) { for (j in 2:length(object)) { pos2take <- length(sampledValues) # select new state of the sequence from the old state # t0 refers to the old state newVals <-markovchainSequence(n = 1, markovchain = object[[j]], t0 = sampledValues[pos2take]) # update in every iteration outIter <- c(outIter, i) sampledValues <- c(sampledValues, newVals) } } # populate the helper vectors iteration <- c(iteration, outIter) values <- c(values, sampledValues) } # defining the output if (what == "data.frame") { out <- data.frame(iteration = iteration, values = values) } else { # ouput in matrix format out <- matrix(data = values, nrow = n, byrow = TRUE) # store each row of the matrix in the list if (what == 'list') { outlist <- list() for (i in 1:nrow(out)) outlist[[i]] <- out[i, ] out <- outlist } } } return(out) } ###################################################################### # helper function to calculate one sequence .markovchainSPHelper <- function(x, t0, mclist, include.t0) { # number of transition matrices n <- length(mclist@markovchains) # take care of initial state vin <- 0 if(include.t0) vin <- 1 # a character vector to store a single sequence seq <- character(length = n + vin) if(length(t0) == 0) { stateNames <- mclist@markovchains[[1]]@states t0 <- sample(x = stateNames, size = 1, prob = rep(1 / length(stateNames), length(stateNames))) } if(include.t0) seq[1] <- t0 invisible(lapply(seq_len(n), function(i) { stateNames <<- mclist@markovchains[[i]]@states byRow <- mclist@markovchains[[i]]@byrow # check whether transition matrix follows row-wise or column-wise fashion if(byRow) prob <- mclist@markovchains[[i]]@transitionMatrix[which(stateNames == t0), ] else prob <- mclist@markovchains[[i]]@transitionMatrix[, which(stateNames == t0)] # initial state for the next transition matrix t0 <<- sample(x = stateNames, size = 1, prob = prob) # populate the sequence vector seq[i+vin] <<- t0 } )) return(seq) } # Function to generate a list of sequence of states in parallel from non-homogeneous Markov chains. # # Provided any markovchainList object, it returns a list of sequence of states coming # from the underlying stationary distribution. # # @param n Sample size # @param object markovchainList object # @param t0 Initial state # @param num.cores Number of cores # .markovchainSequenceParallel <- function(n, object, t0 = character(), num.cores = NULL, include.t0 = FALSE) { # check for the validity of non-uniform markov chain verify <- .checkSequence(object@markovchains) if (!verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!") } # Calculate the number of cores # It's not good to use all cores no_cores <- max(1,parallel::detectCores() - 1) # number of cores specified should be less than or equal to maximum cores available if((! is.null(num.cores)) && num.cores <= no_cores + 1 && num.cores >= 1) { no_cores <- num.cores } # Initiate cluster cl <- parallel::makeCluster(no_cores) # export the variables to be used in the helper function # parallel::clusterExport(cl, "t0") # export the variables to be used in the helper function mclist <- object # parallel::clusterExport(cl, "mclist") # list of n sequence listSeq <- tryCatch(parallel::parLapply(cl, 1:n, .markovchainSPHelper, t0, mclist, include.t0), error=function(e) e, warning=function(w) w) # release the resources parallel::stopCluster(cl) return(listSeq) } ###################################################################### # function to fit a DTMC with Laplacian Smoother .mcFitLaplacianSmooth <- function(stringchar, byrow, laplacian = 0.01) { # every element of the matrix store the number of times jth state appears just # after the ith state origNum <- createSequenceMatrix(stringchar = stringchar, toRowProbs = FALSE) # add laplacian to the sequence matrix # why? to avoid the cases where sum of row is zero newNum <- origNum + laplacian # store sum of each row in the vector newSumOfRow <- rowSums(newNum) # helper matrix to convert frequency matrix to transition matrix newDen <- matrix(rep(newSumOfRow, length(newSumOfRow)), byrow = FALSE, ncol = length(newSumOfRow)) # transition matrix transMatr <- newNum / newDen # create a markovchain object outMc <- new("markovchain", transitionMatrix = transMatr, name = "Laplacian Smooth Fit") # transpose the transition matrix if (!byrow) { outMc@transitionMatrix <- t(outMc@transitionMatrix) outMc@byrow <- FALSE } # wrap markovchain object in a list out <- list(estimate = outMc) return(out) } # function that return a Markov Chain from a given matrix of observations # .matr2Mc <- function(matrData, laplacian = 0) { # # # number of columns in the input matrix # nCols <- ncol(matrData) # # # an empty character vector to store names of possible states # uniqueVals <- character() # # # populate uniqueVals with names of states # for(i in 1:nCols) { # uniqueVals <- union(uniqueVals, unique(as.character(matrData[,i]))) # } # # # possible states in lexicographical order # uniqueVals <- sort(uniqueVals) # # # create a contingency matrix which store the number of times # # jth state appear just after the ith state # contingencyMatrix <- matrix(rep(0, length(uniqueVals)^2), ncol = length(uniqueVals)) # # # set the names of rows and columns # rownames(contingencyMatrix) <- colnames(contingencyMatrix) <- uniqueVals # # # fill the contingency matrix # for (i in 1:nrow(matrData)) { # for (j in 2:nCols) { # # state in the ith row and (j-1)th column # stateBegin <- as.character(matrData[i, j-1]) # # # index of beginning state # whichRow <- which(uniqueVals == stateBegin) # # # state in the ith row and jth column # stateEnd <- as.character(matrData[i, j]) # # # index of ending state # whichCols <- which(uniqueVals == stateEnd) # # # update the contingency matrix # contingencyMatrix[whichRow, whichCols] <- contingencyMatrix[whichRow, whichCols] + 1 # } # } # # # add laplacian correction if needed # contingencyMatrix <- contingencyMatrix + laplacian # # # take care of rows with all entries 0 # sumOfRows <- rowSums(contingencyMatrix) # for(i in 1:length(sumOfRows)) { # if(sumOfRows[i] == 0) { # contingencyMatrix[i, ] <- 1 # sumOfRows[i] <- length(sumOfRows) # } # } # # # get a transition matrix and a DTMC # transitionMatrix <- contingencyMatrix / sumOfRows # # # markov chain object to be returned # outMc <- new("markovchain", transitionMatrix = transitionMatrix) # # return(outMc) # } #' @title markovchainListFit #' #' @description Given a data frame or a matrix (rows are observations, by cols #' the temporal sequence), it fits a non - homogeneous discrete time markov chain #' process (storing row). In particular a markovchainList of size = ncol - 1 is obtained #' estimating transitions from the n samples given by consecutive column pairs. #' #' @param data Either a matrix or a data.frame or a list object. #' @param laplacian Laplacian correction (default 0). #' @param byrow Indicates whether distinc stochastic processes trajectiories are shown in distinct rows. #' @param name Optional name. #' #' @details If \code{data} contains \code{NAs} then the transitions containing \code{NA} will be ignored. #' @return A list containing two slots: #' estimate (the estimate) #' name #' #' @examples #' #' # using holson dataset #' data(holson) #' # fitting a single markovchain #' singleMc <- markovchainFit(data = holson[,2:12]) #' # fitting a markovchainList #' mclistFit <- markovchainListFit(data = holson[, 2:12], name = "holsonMcList") #' @export markovchainListFit <- function(data, byrow = TRUE, laplacian = 0, name) { # check the format of input data if (!any(is.list(data),is.data.frame(data),is.matrix(data))) { stop("Error: data must be either a matrix or a data.frame or a list") } freqMatrixes <- list() # a pure list= a list and not a data frame if((is.list(data) == TRUE) & (is.data.frame(data)==FALSE)) { markovchains <- list() # list of frequency matrix freqMatrixes <- .mcListFitForList(data) } else{ # if input is data frame convert it to matrix if(is.data.frame(data)) { data <- unname(as.matrix(data)) } # make the entries row wise if it is not if(!byrow) { data <- t(data) } # number of columns in the matrix nCols <- ncol(data) # fit by columns freqMatrixes <- lapply(seq_len(nCols-1), function(i){ # (i-1)th transition matrix for transition from (i-1)th state to ith state matrData <- data[, c(i, i+1)] matrData[1, ] <- as.character(matrData[1, ]) # checking particular data for NA values. validTransition <- any(apply(matrData, 1, function(x){ !any(is.na(x)) })) if(validTransition) createSequenceMatrix(matrData, toRowProbs = FALSE, sanitize = TRUE) }) freqMatrixes <- freqMatrixes[ !sapply(freqMatrixes, is.null) ] } if(length(freqMatrixes) == 0) { return(list()) } markovchains <- lapply(freqMatrixes, function(freqMatrix){ # add laplacian correction freqMatrix <- freqMatrix + laplacian rSums <- rowSums(freqMatrix) # transition matrix tMatrix <- freqMatrix / rSums; estMc <- new("markovchain", transitionMatrix = tMatrix) estMc }) # create markovchainList object outMcList <- new("markovchainList", markovchains = markovchains) # wrap the object in a list out <- list(estimate = outMcList) # set the name of markovchainList object as given in the argument if(!missing(name)) { out$estimate@name <- name } return(out) } #' A function to compute multinomial confidence intervals of DTMC #' #' @description Return estimated transition matrix assuming a Multinomial Distribution #' #' @param transitionMatrix An estimated transition matrix. #' @param countsTransitionMatrix Empirical (conts) transition matrix, on which the \code{transitionMatrix} was performed. #' @param confidencelevel confidence interval level. #' #' @return Two matrices containing the confidence intervals. #' #' @seealso \code{markovchainFit} #' #' @references Constructing two-sided simultaneous confidence intervals #' for multinomial proportions for small counts in a large number of cells. #' Journal of Statistical Software 5(6) (2000) #' #' @examples #' seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") #' mcfit<-markovchainFit(data=seq,byrow=TRUE) #' seqmat<-createSequenceMatrix(seq) #' multinomialConfidenceIntervals(mcfit$estimate@transitionMatrix, seqmat, 0.95) #' @export multinomialConfidenceIntervals<-function(transitionMatrix, countsTransitionMatrix, confidencelevel=0.95) { out<-.multinomialCIRcpp(transMat=transitionMatrix, seqMat=countsTransitionMatrix,confidencelevel=confidencelevel) return(out) } #' return a joint pdf of the number of visits to the various states of the DTMC #' #' @description This function would return a joint pdf of the number of visits to #' the various states of the DTMC during the first N steps. #' #' @usage noofVisitsDist(markovchain,N,state) #' #' @param markovchain a markovchain-class object #' @param N no of steps #' @param state the initial state #' #' @details #' This function would return a joint pdf of the number of visits to #' the various states of the DTMC during the first N steps. #' #' @return a numeric vector depicting the above described probability density function. #' #' @author Vandit Jain #' #' @examples #' transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr, #' name="simpleMc") #' noofVisitsDist(simpleMc,5,"a") #' #' @export noofVisitsDist <- function(markovchain,N = 5,state) { if(!is(markovchain,"markovchain")) stop("please provide a valid markovchain-class object") if(N <= 0) stop("please enter positive number of steps") # the transition matrix Tmatrix <- markovchain@transitionMatrix # character vector of states of the markovchain stateNames <- states(markovchain) i<--1 # initial state i <- which(stateNames == state) if(i==-1) stop("please provide a valid inital state") # call to Rcpp implementation of the function out <- .noofVisitsDistRCpp(Tmatrix,i,N) # adds state names names to the output vector names(out) <- stateNames out <- c(out) return(out) } markovchain/R/matlab_package_functions.R0000644000176200001440000000047115137702633020114 0ustar liggesusers#' Matrix to create zeros #' #' @param n size of the matrix #' #' @return a square matrix of zeros zeros <- function(n) { return(matrix(0,n,n)) } #' Returns an Identity matrix #' #' @param n size of the matrix #' #' @return a identity matrix ones <- function(n) { return(matrix(1,n,n)) } markovchain/R/fitHigherOrder.R0000644000176200001440000000542515137702633016022 0ustar liggesusers#' @title Higher order Markov Chains class #' @name HigherOrderMarkovChain-class #' @description The S4 class that describes \code{HigherOrderMarkovChain} objects. #' #' @export setClass("HigherOrderMarkovChain", #class name representation( states = "character", order = "numeric", transitions = "list", name = "character" ) # , prototype(states = c("a","b"), byrow = TRUE, # prototypizing # transitionMatrix=matrix(data = c(0,1,1,0), # nrow=2, byrow=TRUE, dimnames=list(c("a","b"), c("a","b"))), # name="Unnamed Markov chain") ) # objective function to pass to solnp .fn1=function(params) { QX <- get("QX") X <- get("X") error <- 0 for (i in 1:length(QX)) { error <- error+(params[i] * QX[[i]]-X) } return(sum(error^2)) } # equality constraint function to pass to solnp .eqn1=function(params){ return(sum(params)) } #' @name fitHigherOrder #' @aliases seq2freqProb seq2matHigh #' @title Functions to fit a higher order Markov chain #' #' @description Given a sequence of states arising from a stationary state, it #' fits the underlying Markov chain distribution with higher order. #' @usage #' fitHigherOrder(sequence, order = 2) #' seq2freqProb(sequence) #' seq2matHigh(sequence, order) #' #' @param sequence A character list. #' @param order Markov chain order #' @return A list containing lambda, Q, and X. #' #' @references #' Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov #' chains. In Markov Chains (pp. 141-176). Springer US. #' #' Ching, W. K., Ng, M. K., & Fung, E. S. (2008). Higher-order multivariate #' Markov chains and their applications. Linear Algebra and its Applications, #' 428(2), 492-507. #' #' @author Giorgio Spedicato, Tae Seung Kang #' #' @examples #' sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b", #' "c", "a", "b", "c", "a", "b", "a", "b") #' fitHigherOrder(sequence) #' #' @export fitHigherOrder<-function(sequence, order = 2) { # prbability of each states of sequence if (requireNamespace("Rsolnp", quietly = TRUE)) { X <- seq2freqProb(sequence) # store h step transition matrix Q <- list() QX <- list() for(o in 1:order) { Q[[o]] <- seq2matHigh(sequence, o) QX[[o]] <- Q[[o]]%*%X } environment(.fn1) <- environment() params <- rep(1/order, order) model <- Rsolnp::solnp(params, fun=.fn1, eqfun=.eqn1, eqB=1, LB=rep(0, order), control=list(trace=0)) lambda <- model$pars out <- list(lambda=lambda, Q=Q, X=X) } else { print("package Rsolnp unavailable") out <- NULL } return(out) } markovchain/R/markovchain.R0000644000176200001440000000360715137710345015416 0ustar liggesusers#' @title Easy Handling Discrete Time Markov Chains #' #' @description The package contains classes and method to create and manage #' (plot, print, export for example) discrete time Markov chains (DTMC). In #' addition it provide functions to perform statistical (fitting and drawing #' random variates) and probabilistic (analysis of DTMC proprieties) analysis #' #' @author #' Giorgio Alfredo Spedicato #' Maintainer: Giorgio Alfredo Spedicato #' @references Discrete-Time Markov Models, Bremaud, Springer 1999 #' @keywords package #' #' @examples #' # create some markov chains #' statesNames=c("a","b") #' mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, #' nrow=2, dimnames=list(statesNames,statesNames))) #' #' statesNames=c("a","b","c") #' mcB<-new("markovchain", states=statesNames, transitionMatrix= #' matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, #' byrow=TRUE, dimnames=list(statesNames, statesNames))) #' #' statesNames=c("a","b","c","d") #' matrice<-matrix(c(0.25,0.75,0,0,0.4,0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) #' mcC<-new("markovchain", states=statesNames, transitionMatrix=matrice) #' mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #' #' #' #operations with S4 methods #' mcA^2 #' steadyStates(mcB) #' absorbingStates(mcB) #' markovchainSequence(n=20, markovchain=mcC, include=TRUE) "_PACKAGE" #' @useDynLib markovchain, .registration = TRUE #' @import igraph #' @import Matrix #' @import methods #' @import parallel #' @importFrom utils packageDescription #' @importFrom Rcpp evalCpp #' @importFrom RcppParallel RcppParallelLibs #' @importFrom stats4 plot summary #' @importFrom expm %^% logm #' @importFrom stats sd rexp chisq.test pchisq predict aggregate #' @importFrom grDevices colors NULLmarkovchain/R/supplementaryPlot.R0000644000176200001440000000541615137702633016664 0ustar liggesusers# plot a diagram using diagram for a markovchain object .plotdiagram <- function(object, ...) { if(is(object,"markovchain")){ mat <- object@transitionMatrix list <- .communicatingClassesRcpp(object) sections <- length(list) colorList <- grDevices::colors() colorList <- sample(colorList,sections) colorvector <- rep("white",length(object@states)) for(i in 1:length(list)){ part <- list[[i]] for(j in 1:length(part)){ colorvector[match(part[j],object@states)] <- colorList[i] } } } else if(is(object,"ctmc")){ mat <- object@generator colorvector <- rep("white",length(object@states)) } if(object@byrow == FALSE) { mat <- t(mat) } if (!requireNamespace("diagram", quietly = TRUE)) { print("The diagram package is not available") } else{ diagram::plotmat(t(mat),relsize = 0.75,box.col = colorvector, ...) } } # plot a diagram using DiagrammeR for a markovchain object .plotDiagrammeR <- function(object, ...) { if(is(object,"markovchain")){ mat <- object@transitionMatrix } else if(is(object,"ctmc")){ mat <- object@generator } names <- rownames(mat) # names of nodes nodes <- '' for(i in 1:nrow(mat)) { nodes <- paste0(nodes, names[i], "; ") } # store edges edges <- '' for(i in 1:nrow(mat)) { for(j in 1:ncol(mat)) { edges <- paste0(edges, names[i], "->", names[j], " [label = ", mat[i,j], "] ") } } # extract extra parameter dots <- list(...) args <- "" for(name in names(dots)) { args <- paste0(args, name, "=\"", dots[[name]], "\" ") } # print(args) if (requireNamespace("DiagrammeR", quietly = TRUE)) { res <- DiagrammeR::grViz(paste0(" digraph circles { graph [overlap = true, fontsize = 10] node [shape = circle, fixedsize = true, width = 0.9] // sets as circles ", nodes, " ", edges, args," // labelfontsize = 20 labelloc='t' label ='Weather transition matrix' } ")) return (res) } else { print("Diagrammer unavailable") } } # How to do plotting? # mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), # transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, # 0.3, 0.4, 0.3, # 0.2, 0.45, 0.35), byrow = T, nrow = 3), # name = "Weather") # mcWeather # .plotdiagram(mcWeather, box.size = 0.06) # .plotDiagrammeR(mcWeather, label ="Weather transition matrix", labelloc = "t") # plot(mcWeather, package = "DiagrammeR", label = "Weather transition matrix")markovchain/R/zzz.R0000644000176200001440000000112315137702633013741 0ustar liggesusers# Author: Giorgio ############################################################################### # loading the markovchain package .onAttach <- function(libname, pkgname) { desc <- packageDescription(pkgname, libname) packageStartupMessage('Package: ', desc$Package, '\n', 'Version: ', desc$Version, '\n', 'Date: ', desc$Date, '\n', 'BugReport: ', desc$BugReports, '\n') } # for unloading dynamic libraries .onUnload <- function (libpath) { library.dynam.unload("markovchain", libpath) }markovchain/R/hommc.R0000644000176200001440000002572015137702633014220 0ustar liggesusers#' An S4 class for representing High Order Multivariate Markovchain (HOMMC) #' #' @slot order an integer equal to order of Multivariate Markovchain #' @slot states a vector of states present in the HOMMC model #' @slot P array of transition matrices #' @slot Lambda a vector which stores the weightage of each transition matrices in P #' @slot byrow if FALSE each column sum of transition matrix is 1 else row sum = 1 #' @slot name a name given to hommc #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesName <- c("a", "b") #' #' P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) #' P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) #' P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) #' P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) #' P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) #' #' Lambda <- c(0.8, 0.2, 0.3, 0.7) #' #' ob <- new("hommc", order = 1, states = statesName, P = P, #' Lambda = Lambda, byrow = FALSE, name = "FOMMC") #' #'@export hommc <- setClass("hommc", slots = list(order = "numeric", states = "character", P = "array", Lambda = "numeric", byrow = "logical", name = "character") ) # internal method to show hommc object in informative way .showHommc <- function(object) { # whether data in transition matrices are stored in column-wise or row-wise fashion if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } # display order and unique states cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") # display transition matrices and the corresponding lambdas n <- object@order s <- sqrt((dim(object@P))[3]/n) for(i in 1:s) { for(j in 1:s) { # t is the index of transition matrix for transition from i sequence to j sequence # order of transition matrices in P is P1{1,1},P2{1,1}..Pn{1,1},P1{1,2}....Pn{s,s} t <- n * s * (i-1) + (j-1) * n for(k in 1:n) { cat("Lambda", k, "(", i, ",", j, ") : ", object@Lambda[t+k],"\n", sep = "") cat("P", k, "(", i, ",", j, ") : \n", sep = "") print(object@P[, , t+k]) cat("\n") } } } } #' @title Function to display the details of hommc object #' @description This is a convenience function to display the slots of hommc object #' in proper format #' #' @param object An object of class hommc #' #' @rdname hommc-show #' @export setMethod("show", "hommc", function(object){ .showHommc(object) } ) # all transition matrices # n*s*s n = order s = number of categorical sequences # verified using two examples from research paper .allTransMat <- function(data, order = 2) { n <- order # order uelement <- sort(unique(as.character(data))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(data) # number of categorical sequence lseq <- ncol(data) # length of each categorical sequence # store all transition matrices allTmat <- array(dim = c(length(uelement), length(uelement), n*s*s), dimnames = list(uelement, uelement)) t <- 1 # help for(i in 1:s) { for(j in 1:s) { x <- data[j, ] # jth sequence y <- data[i, ] # ith sequence # jumps for(h in 1:n) { # column wise allTmat[ , , t] <- t(createSequenceMatrix(matrix(c(x[1:(lseq-h)], y[-(1:h)]), ncol = 2, byrow = FALSE), toRowProbs = TRUE, possibleStates = uelement, sanitize = TRUE)) t <- t + 1 } } } return(allTmat) } # distribution of each categorical sequence based on the frequency # verified using two examples from research paper .allFreqProbMat <- function(data) { uelement <- sort(unique(as.character(data))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(data) # number of categorical sequence # frequency based probability for all sequences freqMat <- array(0, dim = c(m, 1, s), dimnames = list(uelement)) for(i in 1:s) { idata <- data[i, ] # ith categorical sequence # populate frequency matrix for(j in idata) { freqMat[j, 1, i] <- freqMat[j, 1, i] + 1 } # normalization freqMat[, , i] <- freqMat[, , i] / sum(freqMat[, , i]) } return(freqMat) } # objective function to pass to solnp .fn3 <- function(params, ...) { hdata <- list(...) # calculate error error <- 0 # number of categorical sequence s <- hdata$s # order n <- hdata$n # number of uniq states || dimension of t-matrix m <- hdata$m # array of transition matrices allTmat <- hdata$allTmat # all frequency matrix freqMat <- hdata$freqMat # norm Norm <- hdata$Norm for(i in 1:s) { helper <- matrix(0, nrow = m*n, ncol = 1) for(j in 1:s) { helper2 <- matrix(0, nrow = m, ncol = 1) y <- n * (j - 1 + s * (i - 1)) for(k in 1:n) { helper2 <- helper2 + params[y + k] * (allTmat[ , , y + k] %*% matrix(freqMat[ , , j])) } helper[1:m, ] <- helper[1:m, ] + helper2 if(i == j && n>= 2) { for(k in 2:n) { p <- (k - 1) * m helper[(p + 1):(p + m)] <- freqMat[ , , j] } } } error <- error + sum(abs((helper - freqMat[ , , i]) ^ Norm)) } return(error ^ (1 / Norm)) } # equality constraint function to pass to solnp .eqn3 <- function(params, ...) { hdata <- list(...) # number of categorical sequence s <- hdata$s # order n <- hdata$n toReturn <- numeric() for(i in 1:s) { toReturn[i] <- sum(params[((i - 1) * n * s + 1):(i * n * s)]) } return(toReturn) } #' Function to fit Higher Order Multivariate Markov chain #' #' @description Given a matrix of categorical sequences it fits #' Higher Order Multivariate Markov chain. #' #' @param seqMat a matrix or a data frame where each column #' is a categorical sequence #' @param order Multivariate Markov chain order. Default is 2. #' @param Norm Norm to be used. Default is 2. #' #' @return an hommc object #' #' @examples #' data <- matrix(c('2', '1', '3', '3', '4', '3', '2', '1', '3', '3', '2', '1', #' c('2', '4', '4', '4', '4', '2', '3', '3', '1', '4', '3', '3')), #' ncol = 2, byrow = FALSE) #' #' fitHighOrderMultivarMC(data, order = 2, Norm = 2) #' #' @references W.-K. Ching et al. / Linear Algebra and its Applications #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @export fitHighOrderMultivarMC <- function(seqMat, order = 2, Norm = 2) { if (requireNamespace("Rsolnp", quietly = TRUE)) { message("This function is experimental") if(is.data.frame(seqMat) == TRUE) { seqMat <- as.matrix(seqMat) } seqMat <- t(seqMat) # array of transition matrices allTmat <- .allTransMat(seqMat, order = order) # array of freq probability freqMat <- .allFreqProbMat(seqMat) n <- order # order uelement <- sort(unique(as.character(seqMat))) # unique element m <- length(uelement) # dim of trans-matrix s <- nrow(seqMat) # number of categorical sequence lmbda <- rep(1 / (n * s), n * s * s) fit <- Rsolnp::solnp(pars = lmbda, fun = .fn3, eqfun = .eqn3, eqB = rep(1, s), LB = rep(0, n * s * s), control = list(trace = 0), allTmat = allTmat, freqMat = freqMat, n = n, m = m, s = s, Norm = Norm) return(new("hommc", order = order, Lambda = fit$pars, P = allTmat, states = uelement, byrow = FALSE)) } else { print("Rsolnp unavailable") return(NULL) } } #' Simulate a higher order multivariate markovchain #' #' @description #' This function provides a prediction of states for a higher order #' multivariate markovchain object #' #' @usage predictHommc(hommc,t,init) #' #' @param hommc a hommc-class object #' @param t no of iterations to predict #' @param init matrix of previous states size of which depends on hommc #' #' @details #' The user is required to provide a matrix of giving n previous coressponding #' every categorical sequence. Dimensions of the init are s X n, where s is #' number of categorical sequences and n is order of the homc. #' #' @return #' The function returns a matrix of size s X t displaying t predicted states #' in each row coressponding to every categorical sequence. #' #' @author Vandit Jain #' #' #' @export predictHommc <- function(hommc, t, init) { ## order of markovchain n <- hommc@order ## number of categorical sequences s <- sqrt((dim(hommc@P))[3]/n) ## list of states states <- hommc@states ## size of set of all possible states m <- length(states) ## if initial states not provided take statndard example if(missing(init)) { init <- matrix(rep(states[1],s*n),nrow = s,byrow = TRUE) } if(!all(dim(init) == c(s,n))){ stop("Please provide sufficient number of previous states") } if(!is(hommc, "hommc")) { stop("Please provide a valid hommc-class object") } if(t <=0) stop("T should be a positive integer") for(i in 1:s) { for(j in 1:n) { if(!(init[i,j] %in% states)) stop("invalid states in provided state matrix init") } } ## initialize result matrix result <- matrix(NA,nrow = s,ncol = t) ## runs loop according to hommc class structure for(i in 1:t) { for(j in 1:s) { ## initialises probability according rowProbs <- rep(0,m) ## probability for current sequence depends all sequence for(k in 1:s) { ## gets index of coressponding in the 3-D array P # index is the index of transition matrix for transition from i sequence to j sequence # order of transition matrices in P is P1{1,1},P2{1,1}..Pn{1,1},P1{1,2}....Pn{s,s} index <- n * s * (j-1) + n * (k-1) ## iterates for all order 1 to n for(h in 1:n) { prev <- init[j,n-h+1] label <- which(prev == states) rowProbs <- rowProbs + hommc@Lambda[h + index] * hommc@P[label, ,h + index] } } ## uses sample function from base package curr <- sample(size = 1, x = states, prob = rowProbs) ## changes init for next t iteration for(temp in 2:n) { if(temp <= n) init[j,temp-1] = init[j,temp] } init[j,n] = curr; result[j,i] = curr; } } ## returns result return(result) } markovchain/R/random.R0000644000176200001440000000235115137702633014370 0ustar liggesusers# Methods to generate random markov chains normalizeMatrix <- function(matrix, byrow = TRUE) { margin <- ifelse (byrow, 1, 2) n <- nrow(matrix) result <- sapply(1:n, function(i) { row <- matrix[i, ] rowSum <- sum(row) if (rowSum == 0) { values <- c(rep(0, i - 1), 1, rep(0, n - i)) values } else { row / rowSum } }) # If we want the result by rows, we have to transpose the matrix, # since the apply method with margin = 1 (over rows) returns the result # by columns if (byrow) t(result) else result } # Returns a random stochastic matrix randomStochasticMatrix <- function(n, zeroProb, byrow = TRUE) { numRandom <- n * n randomNums <- stats::runif(numRandom) remainProb <- (1 - zeroProb) / numRandom probs <- c(zeroProb, rep(remainProb, numRandom)) entries <- sample(c(0, randomNums), numRandom, prob = probs, replace = TRUE) result <- matrix(entries, n, n, byrow) result <- normalizeMatrix(result, byrow) result } randomMarkovChain <- function(n, zeroProb = 0.95, byrow = TRUE) { matrix <- randomStochasticMatrix(n, zeroProb, byrow) new("markovchain", transitionMatrix = matrix, byrow = byrow) }markovchain/R/statisticalTests.R0000644000176200001440000004317715137702633016472 0ustar liggesusers#helper functions #helper function for checkMP .findNijPjk<-function(Nijk = Nijk, Nij = Nij, trans, row = 1){ i <- Nijk[row,1] j <- Nijk[row,2] k <- Nijk[row,3] fromCh <- as.character(j) toCh <- as.character(k) Pjk <- trans[fromCh,toCh] m1 <- which(Nij[, 1] == i) m2 <- which(Nij[, 2] == j) m <- c(m1, m2) return(Nij[m[anyDuplicated(m)], 3] * Pjk) } #' @name verifyMarkovProperty #' #' @rdname statisticalTests #' @family statisticalTests #' #' @title Various functions to perform statistical inference of DTMC #' @description These functions verify the Markov property, assess #' the order and stationarity of the Markov chain. #' #' @param sequence An empirical sequence. #' @param verbose Should test results be printed out? #' @param nblocks Number of blocks. #' #' @return Verification result #' #' @references Anderson and Goodman. #' #' @author Tae Seung Kang, Giorgio Alfredo Spedicato #' #' @seealso \code{markovchain} #' #' @examples #' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", #' "a", "b", "a", "a", "b", "b", "b", "a") #' mcFit <- markovchainFit(data = sequence, byrow = FALSE) #' verifyMarkovProperty(sequence) #' assessOrder(sequence) #' assessStationarity(sequence, 1) #' #' #' @export # check if the sequence holds the Markov property verifyMarkovProperty <- function(sequence, verbose = TRUE) { #warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") #fitting the markovchain transMatrix <- markovchainFit(data = sequence)$estimate@transitionMatrix #make the (n-2)x3 matrix for observations subSample<-sequence[1:(length(sequence) - (length(sequence)%%3))] seqSet1<-matrix(c(subSample[1:(length(subSample) - 2)], subSample[2:(length(subSample) - 1)], subSample[3:(length(subSample))] ),ncol = 3) #fill the matrix in reverse order so position 11 is the first obersvation,12 second and 13 third #compute row frequencies temp<-as.data.frame(seqSet1) Nijk<-aggregate(temp, by = temp, length)[1:(ncol(temp) + 1)] seqSet2 <- seqSet1[, -3] #make matrix of couples temp2 <- as.data.frame(seqSet2) Nij <- aggregate(temp2, by = temp2, length)[1:(ncol(temp2) + 1)] #rowfrequencies included test<-c(length = dim(Nijk)[1]) #compute the test statistic invisible(lapply(seq_len(dim(Nijk)[1]),function(i) { foundNijPjk <- .findNijPjk(Nijk = Nijk, Nij = Nij, trans = transMatrix, row = i) test[i] <<- ((Nijk[i,4]-foundNijPjk)^2)/foundNijPjk }) ) statistic <- sum(test) #return value of the test statistic and test at confience level 95% and 99% #dof #Steps : No. of df = No. of triplets - No. of doubles + No. of observations - 1 #Creating vector of doubles/pairs doubles = numeric(length(sequence)-1) for(i in 1:(length(doubles))) {doubles[i] = paste(sequence[i], sequence[i+1], sep="", collapse = NULL)} #Creating vector of triplets triples = numeric(length(sequence)-2) for(i in 1:(length(triples))) {triples[i] = paste(sequence[i], sequence[i+1], sequence[i+2], sep="", collapse = NULL)} #Hence no. of df is--- dof = length(unique(triples)) - length(unique(doubles)) + length(unique(sequence)) - 1 pvalue <- 1-pchisq(q = statistic,df = dof) out <- list(statistic = statistic,dof = dof,p.value = pvalue) if (verbose == TRUE) { cat("Testing markovianity property on given data sequence\n") cat("Chi - square statistic is:", statistic, "\n") cat("Degrees of freedom are:", dof, "\n") cat("And corresponding p-value is:", pvalue, "\n") } invisible(out) } #' @rdname statisticalTests #' @export # check if sequence is of first order or of second order assessOrder <- function(sequence, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") # length of sequence n <- length(sequence) # unique states states <- unique(sequence) # number of unique states nelements <- length(states) TStat <- 0 for(present in states) { # going to be a transition matrix mat <- zeros(nelements) dimnames(mat) <- list(states, states) # populate transition matrix for(i in 1:(n - 2)) { if(present == sequence[i + 1]) { past <- sequence[i] future <- sequence[i + 2] mat[past, future] <- mat[past, future] + 1 } } # chi-squared test res <- chisq.test(mat) TStat <- TStat + res$statistic } k <- nelements df <- k * (k - 1)^2 pvalue <- 1-pchisq(q = TStat, df) out <- list(statistic = TStat[[1]], p.value = pvalue[[1]]) # returning the output if (verbose == TRUE) { cat("The assessOrder test statistic is: ", TStat, "\n") cat("The Chi-Square d.f. are: ", df, "\n") cat("The p-value is: ", pvalue, "\n") } invisible(out) } #' @rdname statisticalTests #' @export # check if sequence is stationary assessStationarity <- function(sequence, nblocks, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") # length of sequence n <- length(sequence) # size of each block blocksize <- n / nblocks # vector of unique states states <- unique(sequence) # number of states nstates <- length(states) # sum of the statistics TStat <- 0 # chi-squared test for each state for(i in states) { # init matrix mat <- matrix(0,nblocks, nstates) dimnames(mat) <- list(1:nblocks, states) # compute the transition matrix from sequence for(j in 1:(n - 1)) { if(sequence[j] == i) { # row index b <- ceiling(j / blocksize) # next state future <- sequence[j+1] # update transition matrix mat[b, future] <- mat[b, future] + 1 } } # vector to store row sum of matrix rowsums <- rowSums(mat) # store the indices with zero row sum indices <- which(rowsums == 0) # update rows with zero sum for(k in indices) mat[k, ] <- 1/nstates # update row sum after checking zero sum row rowsums <- rowSums(mat) # row-wise normalize. mat <- mat/rowsums # Some columns may still be all zeros. This causes NaN for chi-squared test. # chi-squared test res <- chisq.test(mat) TStat <- TStat + res$statistic } k <- nstates # degree of freedom df <- k * (nblocks - 1) * (k - 1) pvalue <- 1 - pchisq(TStat, df) # returning the output if (verbose==TRUE) { cat("The assessStationarity test statistic is: ", TStat, "\n") cat("The Chi-Square d.f. are: ", df, "\n") cat("The p-value is: ", pvalue, "\n") } out <- list(statistic = TStat[[1]], p.value = pvalue[[1]]) invisible(out) } # sequence to transition frequencey matrix .seq2mat <- function(sequence) { # basic requirement to create transition matrix n <- length(sequence) states <- unique(sequence) nstates <- length(states) # create transition matrix mat <- zeros(nstates) dimnames(mat) <- list(states, states) # populate transition matrix for(i in 1:(n - 1)) { from <- sequence[i] to <- sequence[i+1] mat[from, to] <- mat[from, to] + 1 } return (mat) } #' @title test whether an empirical transition matrix is compatible to a theoretical one #' #' @description This function tests whether an empirical transition matrix is statistically compatible #' with a theoretical one. It is a chi-square based test. In case a cell in the empirical transition matrix is >0 #' that is 0 in the theoretical transition matrix the null hypothesis is rejected. #' #' @rdname statisticalTests #' @family statisticalTests #' #' @param data matrix, character or list to be converted in a raw transition matrix #' @param object a markovchain object #' #' @return a list with following slots: statistic (the chi - square statistic), dof (degrees of freedom), and corresponding p-value. In case a cell in the empirical transition matrix is >0 #' that is 0 in the theoretical transition matrix the null hypothesis is rejected. In that case a p-value of 0 and statistic and dof of NA are returned. #' @export #' #' @examples #' #' #Example taken from Kullback Kupperman Tests for Contingency Tables and Markov Chains #' #' sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, #' 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, #' 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, #' 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, #' 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) #' #' mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) #' rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") #' #' verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) #' verifyEmpiricalToTheoretical <- function(data, object, verbose = TRUE) { #warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") if (!is(object,'markovchain') ) stop("Error! Object should belong to the markovchain class") if (missing(data) | missing(object)) stop("Error! Required inputs missing") if ( !is.numeric(data) || is.character(data) || is.matrix(data)) stop("Error! Data should be either a raw transition matrix or either a character or a numeric element") if (is.numeric(data) || is.character(data) ) data<-createSequenceMatrix(stringchar = data, possibleStates = states(object)) if (length(setdiff(names(data),names(object))) > 0) stop("Error! Empirical and theoretical tm have different support") # (possibly rearrange columns and rownames) data <- data[match(rownames(data),names(object)),] #matching rows data <- data[,match(colnames(data),names(object))] #matching cols if (sum((data == 0) == (object@transitionMatrix == 0)) == (nrow(data) * ncol(data))) { f_i_dot <-colSums(data) statistic <- 0 for (i in 1:dim(object)) { for (j in 1:dim(object)) { if (data[i, j]>0&object[i, j]>0) statistic <- statistic + data[i, j]*log(data[i, j]/(f_i_dot[i]*object[i, j])) } } statistic <- statistic * 2 null_elements <- sum(object@transitionMatrix == 0) dof <- dim(object) * (dim(object) - 1) - null_elements #r(r-1) - c, c null element ob objects p.value <- 1 - pchisq(q = statistic,df = dof) if (verbose == TRUE) { cat("Testing whether the\n");print(data);cat("transition matrix is compatible with\n");print(object@transitionMatrix);print("theoretical transition matrix") cat("ChiSq statistic is",statistic,"d.o.f are",dof,"corresponding p-value is",p.value,"\n") } out <- list(statistic = statistic, dof = dof,pvalue = p.value) } else { statistic <- NA dof <- NA p.value <- 0 if (verbose == TRUE) { cat("Testing whether the\n");print(data);cat("transition matrix is compatible with\n");print(object@transitionMatrix);print("theoretical transition matrix") cat("ChiSq statistic is",statistic,"d.o.f are",dof,"corresponding p-value is",p.value,"\n") cat("At least one transition is >0 in the data that is 0 in the object. Therefore the null hypothesis is rejected. \n") } out <- list(statistic = statistic, dof = dof,pvalue = p.value) } #return output return(out) } .checkMatrix4Homogeneity<-function(matr) { out<-TRUE if (length(colnames(matr)) == 0) {message("Error! No colnames in input matrix"); out = FALSE} if (length(rownames(matr)) == 0) {message("Error! No rownames in input matrix"); out = FALSE} if (!all.equal(rownames(matr),colnames(matr))) {message("Error! Colnames <> Rownames")} if (any(matr<0)) {message("Error! Negative elements"); out = FALSE} return(out) } .addNamedColumns <- function(matr, fullnames) { if ( length( setdiff(names(matr),fullnames) )>0) stop("Error! Names in matr not in fullnames") fullnames<-sort(fullnames) newMatr<-matrix(0,nrow = length(fullnames),ncol = length(fullnames),dimnames = list(fullnames,fullnames)) current_support = colnames(matr) current_dim = dim(matr) for (i in 1:current_dim[1]) { #cycle on row for (j in 1:current_dim[2]) { #cycle on cols item<-matr[i,j] #take the element which_row_trans<-current_support[i] #define current row and cols which_col_trans<-current_support[j] # lookup element in the pooled table row_to_write <-match(x=which_row_trans,table = fullnames) col_to_write <-match(x=which_col_trans,table = fullnames) # write element into the pooled table newMatr[row_to_write,col_to_write] <- newMatr[row_to_write,col_to_write] + item } } return(newMatr) } #' @title Verify Homogeneity across transition matrices #' #' @description Verifies that the s elements in the input list belongs to the same DTMC #' #' @rdname statisticalTests #' @family statisticalTests #' #' @param inputList A list of items that can coerced to transition matrices #' #' @return a list of transition matrices? #' @export #' #' @examples #' #' data(kullback) #' verifyHomogeneity(inputList=kullback,verbose=TRUE) #' verifyHomogeneity<-function(inputList, verbose = TRUE) { warning("The accuracy of the statistical inference functions has been questioned. It will be thoroughly investigated in future versions of the package.") if (!is.list(inputList) ) stop("Error! inputList should be a string") if (length(inputList) < 2) stop("Error! inputList length lower than 2") #checks whether all inputs can be put as transition matrices for (i in 1:length(inputList)) { if (is.matrix(inputList[[i]]) == TRUE) { checks<-.checkMatrix4Homogeneity(inputList[[i]]) if (!checks) stop("Error! Element ", i, " to be checked") } else { inputList[[i]]<-createSequenceMatrix(stringchar = inputList[[i]]) #convert all elements into transition matrices } } # create the pooled raw transition matrix and the matrix of rowsums all.names<-character() for (i in 1:length(inputList)) { all.names<-c(all.names, rownames(inputList[[i]])) } all.names<-sort(unique(all.names)) ##initialize PooledRawTransitionMatrix <- matrix(0,nrow = length(all.names),ncol = length(all.names),dimnames = list(all.names, all.names)) RowSumsMatrix <- matrix(0, nrow = length(inputList),ncol=length(all.names),dimnames = list(1:length(inputList),all.names)) ##sum for each element in the list for (k in 1:length(inputList)) { current_support = rownames(inputList[[k]]) current_dim = dim(inputList[[k]]) for (i in 1:current_dim[1]) { #cycle on row for (j in 1:current_dim[2]) { #cycle on cols num_trans<-inputList[[k]][i, j] #take the element which_row_trans <- current_support[i] #define current row and cols which_col_trans <- current_support[j] # lookup element in the pooled table row_to_write <-match(x = which_row_trans,table = all.names) col_to_write <-match(x = which_col_trans,table = all.names) # write element into the pooled table PooledRawTransitionMatrix[row_to_write,col_to_write]=PooledRawTransitionMatrix[row_to_write,col_to_write]+num_trans } } } #create the matrix of rowsums fij. for (k in 1:length(inputList)) { my_row_sums <- rowSums(inputList[[k]]) current_support = names(my_row_sums) for (i in 1:length(current_support)) { my_element<-my_row_sums[i] col_to_write<-match(x=current_support[i],table = all.names) RowSumsMatrix[k, col_to_write]<-RowSumsMatrix[k, col_to_write] + my_element } } # compute the chi - square statistic statistic <- 0 # degreesOfFreedomLess <- 0 newInputList <- lapply(inputList, .addNamedColumns,fullnames = all.names) number_of_transitions <- sapply(newInputList,sum) total_transitions <- sum(number_of_transitions) for (s in 1:length(inputList)) { #cycle across inputs for (j in 1:length(all.names)) { #cycle across rows for (k in 1:length(all.names)) { #cycle across cols if (any(newInputList[[s]][j,k] == 0, number_of_transitions[s] == 0, PooledRawTransitionMatrix[j,k] == 0)) { statistic <- statistic + 0 # zero element in log expr does not contribute to statistics # degreesOfFreedomLess <- degreesOfFreedomLess +1 } else { statistic <- statistic + newInputList[[s]][j, k] * log((total_transitions*newInputList[[s]][j, k])/(number_of_transitions[s]*PooledRawTransitionMatrix[j,k])) } } } } statistic <- statistic * 2 #dof (s-1)*(r^2-1)-#zeros degrees_of_freedom <- (length(inputList) - 1)*(length(all.names)^2 - 1)#-degreesOfFreedomLess p.value <- 1 - pchisq(q = statistic,df = degrees_of_freedom) if (verbose == TRUE) { cat("Testing homogeneity of DTMC underlying input list \n") cat("ChiSq statistic is",statistic,"d.o.f are",degrees_of_freedom,"corresponding p-value is",p.value,"\n") } #return output out <- list(statistic = statistic, dof = degrees_of_freedom,pvalue = p.value) return(out) } markovchain/R/classesAndMethods.R0000644000176200001440000012366715137702633016532 0ustar liggesusers#' @title Markov Chain class #' @name markovchain-class #' @aliases markovchain-class *,markovchain,markovchain-method #' *,markovchain,matrix-method *,markovchain,numeric-method #' *,matrix,markovchain-method *,numeric,markovchain-method #' ==,markovchain,markovchain-method !=,markovchain,markovchain-method #' absorbingStates,markovchain-method transientStates,markovchain-method #' recurrentStates,markovchain-method transientClasses,markovchain-method #' recurrentClasses,markovchain-method communicatingClasses,markovchain-method #' steadyStates,markovchain-method meanNumVisits,markovchain-method #' is.regular,markovchain-method is.irreducible,markovchain-method #' is.accessible,markovchain,character,character-method #' is.accessible,markovchain,missing,missing-method #' absorptionProbabilities,markovchain-method #' meanFirstPassageTime,markovchain,character-method #' meanFirstPassageTime,markovchain,missing-method #' meanAbsorptionTime,markovchain-method #' meanRecurrenceTime,markovchain-method #' conditionalDistribution,markovchain-method hittingProbabilities,markovchain-method #' canonicForm,markovchain-method coerce,data.frame,markovchain-method #' coerce,markovchain,data.frame-method coerce,table,markovchain-method #' coerce,markovchain,igraph-method coerce,markovchain,matrix-method #' coerce,markovchain,sparseMatrix-method coerce,sparseMatrix,markovchain-method #' coerce,matrix,markovchain-method coerce,Matrix,markovchain-method #' coerce,msm,markovchain-method #' coerce,msm.est,markovchain-method coerce,etm,markovchain-method #' dim,markovchain-method initialize,markovchain-method #' names<-,markovchain-method plot,markovchain,missing-method #' predict,markovchain-method print,markovchain-method #' show,markovchain-method summary,markovchain-method #' sort,markovchain-method t,markovchain-method #' [,markovchain,ANY,ANY,ANY-method ^,markovchain,numeric-method #' @description The S4 class that describes \code{markovchain} objects. #' #' @param states Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix #' @param byrow TRUE or FALSE indicating whether the supplied matrix #' is either stochastic by rows or by columns #' @param transitionMatrix Square transition matrix #' @param name Optional character name of the Markov chain #' #' @section Creation of objects: #' #' Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}. #' #' @section Methods: #' #' \describe{ #' \item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects} #' \item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication} #' \item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication } #' \item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain} #' \item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication } #' \item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... } #' \item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object} #' \item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object} #' \item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object} #' \item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states } #' \item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form } #' \item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}} #' \item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state} #' \item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}} #' \item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} } #' \item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} } #' \item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects } #' \item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects } #' \item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects } #' \item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one } #' \item{dim}{\code{signature(x = "markovchain")}: method to get the size} #' \item{names}{\code{signature(x = "markovchain")}: method to get the names of states} #' \item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states} #' \item{initialize}{\code{signature(.Object = "markovchain")}: initialize method } #' \item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects } #' \item{predict}{\code{signature(object = "markovchain")}: predict method } #' \item{print}{\code{signature(x = "markovchain")}: print method. } #' \item{show}{\code{signature(object = "markovchain")}: show method. } #' \item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. } #' \item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. } #' \item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. } #' \item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain } #' \item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. } #' \item{t}{\code{signature(x = "markovchain")}: transpose matrix } #' \item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability } #' } #' #' @references #' A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' @note #' \enumerate{ #' \item \code{markovchain} object are backed by S4 Classes. #' \item Validation method is used to assess whether either columns or rows totals to one. #' Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly #' defined for a probability \code{matrix}, coercing to \code{markovchain} object leads #' to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator #' overloading has been applied for \eqn{+,*,^,==,!=} operators. #' } #' #' @seealso \code{\link{markovchainSequence}},\code{\link{markovchainFit}} #' #' @examples #' #show markovchain definition #' showClass("markovchain") #' #create a simple Markov chain #' transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr, #' name="simpleMc") #' #power #' simpleMc^4 #' #some methods #' steadyStates(simpleMc) #' absorbingStates(simpleMc) #' simpleMc[2,1] #' t(simpleMc) #' is.irreducible(simpleMc) #' #conditional distributions #' conditionalDistribution(simpleMc, "b") #' #example for predict method #' sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") #' mcFit<-markovchainFit(data=sequence) #' predict(mcFit$estimate, newdata="b",n.ahead=3) #' #direct conversion #' myMc<-as(transMatr, "markovchain") #' #' #example of summary #' summary(simpleMc) #' \dontrun{plot(simpleMc)} #' #' @keywords classes #' #' @export setClass( # Class name "markovchain", # Define the slots slots = list(states = "character", byrow = "logical", transitionMatrix = "matrix", name = "character"), # Set the default values for the slots prototype = list( states = c("a", "b"), byrow = TRUE, transitionMatrix = matrix( data = c(0, 1, 1, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b"))), name = "Unnamed Markov chain") ) # Initializing method for markovchain objects setMethod( "initialize", signature(.Object = "markovchain"), function (.Object, states, byrow, transitionMatrix, name, ...) { # Put the standard markovchain if (missing(transitionMatrix)) { transitionMatrix <- matrix( data = c(0, 1, 1, 0), nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("a", "b"))) } rowNames <- rownames(transitionMatrix) colNames <- colnames(transitionMatrix) # Check names of transition matrix # if all names are missing it initializes them to "1", "2", .... if (all(is.null(rowNames), is.null(colNames)) == TRUE) { if (missing(states)) { numRows <- nrow(transitionMatrix) stateNames <- as.character(seq(1:numRows)) } else { stateNames <- states } rownames(transitionMatrix) <- stateNames colnames(transitionMatrix) <- stateNames # Fix when rownames null } else if (is.null(rowNames)) { rownames(transitionMatrix) <- colNames # Fix when colnames null } else if (is.null(colNames)) { colnames(transitionMatrix) <- rowNames # Fix when different } else if (! setequal(rowNames, colNames)) { colnames(transitionMatrix) <- rowNames } if (missing(states)) states <- rownames(transitionMatrix) if (missing(byrow)) byrow <- TRUE if (missing(name)) name <- "Unnamed Markov chain" callNextMethod( .Object, states = states, byrow = byrow, transitionMatrix = as.matrix(transitionMatrix), name = name, ... ) } ) #' @title Non homogeneus discrete time Markov Chains class #' @name markovchainList-class #' @aliases [[,markovchainList-method dim,markovchainList-method #' predict,markovchainList-method print,markovchainList-method #' show,markovchainList-method #' @description A class to handle non homogeneous discrete Markov chains #' #' @param markovchains Object of class \code{"list"}: a list of markovchains #' @param name Object of class \code{"character"}: optional name of the class #' #' @section Objects from the Class: #' #' A \code{markovchainlist} is a list of \code{markovchain} objects. They can #' be used to model non homogeneous discrete time Markov Chains, when #' transition probabilities (and possible states) change by time. #' @section Methods: #' \describe{ #' \item{[[}{\code{signature(x = "markovchainList")}: extract the #' i-th \code{markovchain} } #' \item{dim}{\code{signature(x = "markovchainList")}: number #' of \code{markovchain} underlying the matrix } #' \item{predict}{\code{signature(object = "markovchainList")}: predict #' from a \code{markovchainList} } #' \item{print}{\code{signature(x = "markovchainList")}: prints the list #' of markovchains } #' \item{show}{\code{signature(object = "markovchainList")}: same as \code{print} } #' } #' #' @references #' A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @note #' The class consists in a list of \code{markovchain} objects. #' It is aimed at working with non homogeneous Markov chains. #' #' @seealso \code{\linkS4class{markovchain}} #' @examples #' showClass("markovchainList") #' #define a markovchainList #' statesNames=c("a","b") #' #' mcA<-new("markovchain",name="MCA", #' transitionMatrix=matrix(c(0.7,0.3,0.1,0.9), #' byrow=TRUE, nrow=2, #' dimnames=list(statesNames,statesNames)) #' ) #' #' mcB<-new("markovchain", states=c("a","b","c"), name="MCB", #' transitionMatrix=matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), #' nrow=3, byrow=TRUE)) #' #' mcC<-new("markovchain", states=c("a","b","c","d"), name="MCC", #' transitionMatrix=matrix(c(0.25,0.75,0,0,0.4,0.6, #' 0,0,0,0,0.1,0.9,0,0,0.7,0.3), #' nrow=4, byrow=TRUE) #' ) #' mcList<-new("markovchainList",markovchains=list(mcA, mcB, mcC), #' name="Non - homogeneous Markov Chain") #' #' @keywords classes #' #' @export setClass( "markovchainList", slots = list( markovchains = "list", name = "character") ) # Verifies whether a markovchainList object is valid or not # A markovchainList is valid iff all the slots are markovchain objects # Returns true if the markovchainList is valid, the indexes of the # wrong slots otherwise setValidity( "markovchainList", function(object) { check <- FALSE markovchains <- object@markovchains classes <- sapply(markovchains, class) nonMarkovchain <- which(classes != "markovchain") errors <- sapply(nonMarkovchain, function(i) { paste(i, "-th element class is not 'markovchain'") }) if (length(errors) == 0) TRUE else errors } ) # generic method to print out states #' @name states #' #' @title Defined states of a transition matrix #' #' @description This method returns the states of a transition matrix. #' #' @param object A discrete \code{markovchain} object #' @return The character vector corresponding to states slot. #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @author Giorgio Spedicato #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' states(markovB) #' names(markovB) #' #' @rdname states #' #' @export setGeneric("states", function(object) standardGeneric("states")) #' @rdname states #' @title states setMethod( "states", "markovchain", function(object) { object@states } ) #' @title Returns the states for a Markov chain object #' #' @param x object we want to return states for #' #' @rdname names setMethod( "names", "markovchain", function(x) { x@states } ) #' @title Method to retrieve name of markovchain object #' #' @name name #' #' @description This method returns the name of a markovchain object #' #' @param object A markovchain object #' @rdname getName #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' name(markovB) #' #' @export setGeneric("name", function(object) standardGeneric("name")) #' @rdname getName setMethod( "name", "markovchain", function(object) { object@name }) #' @title Method to set name of markovchain object #' #' @name name<- #' #' @description This method modifies the existing name of markovchain object #' #' @param object A markovchain object #' @param value New name of markovchain object #' @rdname setName #' @author Giorgio Spedicato, Deepak Yadav #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' name(markovB) <- "dangerous mc" #' #' @export setGeneric("name<-", function(object, value) standardGeneric("name<-")) #' @rdname setName setMethod( "name<-", "markovchain", function(object, value) { object@name <- value object } ) setMethod( "names<-", "markovchain", function(x, value) { rownames(x@transitionMatrix) <- value colnames(x@transitionMatrix) <- value x@states <- value x } ) #' @exportMethod dim setGeneric("dim") # Generic methods to get the dim of a markovchain and markovchainList setMethod( "dim", "markovchain", function(x) { nrow(x@transitionMatrix) } ) setMethod( "dim", "markovchainList", function(x) { length(x@markovchains) } ) # method to set the validity of a markovchain object setValidity( "markovchain", function(object) { errors <- character() transitionMatrix <- object@transitionMatrix states <- object@states if (length(setdiff(states, unique(states))) > 0) { msg <- "Error! States must be unique!" errors <- c(errors, msg) } # Performs a set of checks. If any error arises, it ends up concatenated to errors # Check all values of transition matrix belongs to [0, 1] maybeProbabilities <- sapply(as.numeric(transitionMatrix), .isProbability) if (any(maybeProbabilities) == FALSE) { msg <- "Error! Some elements of transitionMatrix are not probabilities" errors <- c(errors, msg) } # Check whether matrix is square matrix or not if (nrow(transitionMatrix) != ncol(transitionMatrix)) { msg <- "Error! transitionMatrix is not a square matrix" errors <- c(errors, msg) } if (!.checkMatrix(transitionMatrix, object@byrow)) { msg <- paste( paste("Error!", ifelse(object@byrow, "Rows", "Cols")), "of transition matrix do not sum to one" ) errors <- c(errors, msg) } # Check whether column names or rows names equal to state names or not if (! setequal(colnames(transitionMatrix), states)) { msg <- "Error! Colnames of transitionMatrix do not match states" errors <- c(errors, msg) } if (! setequal(rownames(transitionMatrix), states)) { msg <- "Error! Rownames of transitionMatrix do not match states" errors <- c(errors, msg) } if (length(errors) > 0) errors else TRUE } ) # generic method to extract transition probability # from state t0 to state t1 #' @name transitionProbability #' @title Function to get the transition probabilities from initial #' to subsequent states. #' @description This is a convenience function to get transition probabilities. #' #' @param object A \code{markovchain} object. #' @param t0 Initial state. #' @param t1 Subsequent state. #' #' @references A First Course in Probability (8th Edition), #' Sheldon Ross, Prentice Hall 2010 #' #' @return Numeric Vector #' #' @author Giorgio Spedicato #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' transitionProbability(markovB,"b", "c") #' @rdname transitionProbability #' #' @exportMethod transitionProbability setGeneric("transitionProbability", function(object, t0, t1) standardGeneric("transitionProbability")) #' @rdname transitionProbability setMethod("transitionProbability", "markovchain", function(object, t0, t1) { fromState <- which(object@states == t0) toState <- which(object@states == t1) out <- ifelse(object@byrow == TRUE, object@transitionMatrix[fromState, toState] , object@transitionMatrix[toState, fromState]) return(out) } ) # print, plot and show methods .showInt <- function(object, verbose = TRUE) { # find the direction if (object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } if (verbose == TRUE) { cat(object@name, "\n A ", dim(object), "- dimensional discrete Markov Chain defined by the following states: \n", paste(states(object), collapse=", "), "\n The transition matrix ", direction, " is defined as follows: \n") } print(object@transitionMatrix) cat("\n") } #' @exportMethod show setGeneric("show") # show methods for markovchain and markovchain list objects setMethod("show", "markovchain", function(object){ .showInt(object) } ) setMethod("show", "markovchainList", function(object) { cat(object@name, " list of Markov chain(s)", "\n") for(i in 1:length(object@markovchains)) { cat("Markovchain ",i,"\n") show(object@markovchains[[i]]) } } ) #' @exportMethod print setGeneric("print") # print methods setMethod("print", "markovchainList", function(x) show(x)) setMethod("print", "markovchain", function(x){ object <- x .showInt(object, verbose = FALSE) } ) .getNet <- function(object, round = FALSE) { # function to get the absorbency matrix to plot and export to igraph # # Args: # object: a markovchain object # round: boolean to round # # Returns: # # a graph adjacency if (object@byrow == FALSE) { object <- t(object) } matr <- object@transitionMatrix*100 if(round == TRUE) { matr <- round(matr, 2) } net <- graph.adjacency(adjmatrix = matr, weighted = TRUE, mode = "directed") return(net) } getColorVector <- function(object){ list <- .communicatingClassesRcpp(object) sections <- length(list) colorList <- grDevices::colors() colorList <- sample(colorList,sections) colorvector <- rep("white",length(object@states)) for(i in 1:length(list)){ part <- list[[i]] for(j in 1:length(part)){ colorvector[match(part[j],object@states)] <- colorList[i] } } return(colorvector) } #' @exportMethod plot setGeneric("plot") # Plot methods for markovchain objects # plot method from stat5 setMethod("plot", signature(x = "markovchain", y = "missing"), function(x, y, package = "igraph", ...) { switch(package, diagram = { if (requireNamespace("diagram", quietly = TRUE)) { .plotdiagram(object = x, ...) } else { netMc <- .getNet(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, DiagrammeR = { if (requireNamespace("DiagrammeR", quietly = TRUE)) { .plotDiagrammeR(object = x, ...) } else { netMc <- .getNet(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, { netMc <- .getNet(object = x,round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) }) } ) ##################################################AS METHODS######################### .checkMatrix <- function(matr, byrow = TRUE, verbose = FALSE) { # firstly, check size if (ncol(matr) != nrow(matr)) { if(verbose) stop("Error! Not a quadratic matrix") return(FALSE) } # secondly, check is stochastic isStochastic <- .isStochasticMatrix(matr, byrow) if (!isStochastic) { if (verbose) stop("Error! Either rows or cols should sum to 1") return(FALSE) } # if all test are passed return(TRUE) } # Internal function to return a markovchain object given a matrix .matrix2Mc <- function(from) { # enforce class matrix from <- as.matrix(from) # whether given matrix is a transition matrix or not # if it is then how probabilities are stored # row-wise or columnwise byrow <- TRUE checkByRows <- .checkMatrix(from, byrow = byrow) if(!checkByRows) { byrow <- FALSE checkByCols <- .checkMatrix(from, byrow = byrow) if(!checkByCols) { #error could be either in rows or in cols if (any(colSums(from) != 1)) cat("columns sums not equal to one are:", which(colSums(from) != 1),"\n") if (any(rowSums(from) != 1)) cat("row sums not equal to one are:", which(rowSums(from) != 1),"\n") stop("Error! Not a transition matrix") } } # extract states names if(byrow) { namesCandidate <- rownames(from) } else { namesCandidate <- colnames(from) } # if states names is not there create it s1, s2, s3, .... if(is.null(namesCandidate)) { namesCandidate <- paste("s", 1:nrow(from), sep = "") } # create markovchain object out <- new("markovchain", transitionMatrix = from, states = namesCandidate, byrow = byrow) invisible(out) } #' @exportMethod coerce NULL # coerce matrix to markovchain object using internal method # example: as("some matrix", "markovchain") setAs(from = "matrix", to = "markovchain", def = .matrix2Mc) setAs(from = "Matrix", to = "markovchain", def = .matrix2Mc) # Function to transform a markovchain into a data.frame # Args: # from: a markovchain object # # returns: # a data.frame .mc2Df <- function(from) { # number of rows or columns nr <- nrow(from@transitionMatrix) for(i in 1:nr){ for(j in 1:nr){ t0 <- from@states[i] t1 <- from@states[j] prob <- transitionProbability(object = from, t0 = t0, t1 = t1) #cope with the new default of R 4.0 (5-3-2020) rowDf <- data.frame(t0 = t0, t1 = t1, prob = prob,stringsAsFactors = TRUE ) # go to else part if first row of data frame is generated if(exists("outDf")) { outDf <- rbind(outDf, rowDf) } else { outDf <- rowDf } } } return(outDf) } # method to convert(coerce) from markovchain to data.frame setAs(from = "markovchain", to = "data.frame", def = .mc2Df) # method to find the column which stores transition probability .whichColProb <- function(df) { # column number which stores transition probability out <- 0 # check for validity of data frame if(ncol(df) > 3) { warning("Warning! More than three columns. Only the first three will be used") } if(ncol(df) < 3) { stop("Error! Three columns needed") } for(i in 1:ncol(df)) { # when found the first numeric and probability col if((is(df[, i], "numeric")) & (all(sapply(df[, i], .isProbability) == TRUE))) { out <- i break } } return(out) } # Function to convert from a data.frame containing initial, ending # and probability columns to a proper markovchain object # # Args: # from: a data.frame # # Returns: # A markovchain object .df2Mc <- function(from) { statesNames <- unique(from[, 1]) colProb <- .whichColProb(from) # what is the use # transition matrix prMatr <- zeros(length(statesNames)) rownames(prMatr) <- statesNames colnames(prMatr) <- statesNames for(i in 1:nrow(from)) { idRow <- which(statesNames == from[i, 1]) # assume first col from idCol <- which(statesNames == from[i, 2]) # assume second col to prMatr[idRow, idCol] <- from[i, 3] # assume third col t-probability } out <- new("markovchain", transitionMatrix = prMatr) return(out) } # method to convert(coerce) data frame to markovchain object setAs(from = "data.frame", to = "markovchain", def = .df2Mc) # example # data <- data.frame(from = c("a", "a", "b", "b", "b", "b"), # to = c("a", "b", "b", "b", "b", "a")) # # from <- table(data) # .table2Mc(from) .table2Mc <- function(from) { # check whether table has square dimension or not if(dim(from)[1] != dim(from)[2]) { stop("Error! Table is not squared") } # rows ond columns name should be same if(!setequal(rownames(from),colnames(from))) { stop("Error! Rows not equal to coulumns") } temp <- unclass(as.matrix(from)) # make same sequence of col / row fromMatr <- temp[, order(rownames(temp))] # obtain transition matrix outMatr <- fromMatr / rowSums(fromMatr) out <- new("markovchain", states = rownames(temp), transitionMatrix = outMatr, byrow=TRUE) return(out) } # coerce table to markovchain object setAs(from = "table", to = "markovchain", def = .table2Mc) # function from msm to markovchain # msm is a package. Use this package to create msm object. # see how to create msm object using ?msm .msm2Mc <- function(from) { if(requireNamespace(package='msm', quietly = TRUE)) { temp <- msm::pmatrix.msm(from) prMatr <- unclass(as.matrix(temp)) out <- new("markovchain", transitionMatrix = prMatr) } else { out <- NULL print("msm unavailable") } return(out) } # coerce msm object to markovchain object setClass("msm") setAs(from = "msm", to = "markovchain", def = .msm2Mc) # function for msm.est to mc. Assume a probability matrix given .msmest2Mc <- function(from) { if (is.matrix(from)) { # central estimate pMatr <- from } if (is.list(from)) { # central estimate pMatr <- from[[1]] } out <- new("markovchain", transitionMatrix = as(pMatr, "matrix")) return(out) } # coerce ms.est to markovchain object setClass("msm.est") setAs(from = "msm.est", to = "markovchain", def = .msmest2Mc) # function from etm to markovchain .etm2Mc<-function(from) { # data frame consists of 'from' and 'to' column df <- from$trans # name of states elements <- from$state.names # number of unique states nelements <- length(elements) # temporary t-matrix prMatr <- zeros(nelements) dimnames(prMatr) <- list(elements, elements) # populate t-matrix for(i in 1:dim(df)[1]) { r <- df[i, ] # each row one by one stateFrom <- r$from stateTo <- r$to prMatr[stateFrom, stateTo] <- prMatr[stateFrom, stateTo] + 1 } # convert freq-matrix to trans-matrix rsums <- rowSums(prMatr) prMatr <- prMatr / rsums # take care of rows with all entries 0 if(any(rsums == 0)) { indicesToBeSanitized <- which(rsums == 0) for(i in indicesToBeSanitized) { for(j in 1:nelements) { prMatr[i, j] <- 1 / nelements } } } # create markovchain object out <- new("markovchain", transitionMatrix = prMatr) return(out) } # coerce etm object to markovchain object setClass("etm") setAs(from = "etm", to = "markovchain", def = .etm2Mc) #sparse matrix from Matrix package .sparseMatrix2markovchain<-function(from){ temp<-as(from,"matrix") out <- as(temp, "markovchain") return(out) } .markovchain2sparseMatrix<-function(from){ temp<-as(from,"matrix") out <- as(temp, "sparseMatrix") return(out) } setAs(from = "sparseMatrix", to = "markovchain", def = .sparseMatrix2markovchain) setAs(from = "markovchain", to = "sparseMatrix", def = .markovchain2sparseMatrix) # functions and methods to return a matrix .mc2matrix <- function(from) { out <- from@transitionMatrix return(out) } # coerce markovchain object to matrix(transition) setAs(from = "markovchain", to = "matrix", def = .mc2matrix) # functions and methods to return a matrix .mc2igraph <- function(from) { # convert the markovchain to data.frame temp <- .mc2Df(from=from) # convert the data frame to igraph graph # need to set only non zero weights out <- graph.data.frame(d=temp[temp$prob>0,]) return(out) } # coerce markovchain object to igraph setClass("igraph") setAs(from = "markovchain", to = "igraph", def = .mc2igraph) #' @exportMethod t setGeneric("t") # transposing method for markovchain objects setMethod("t", "markovchain", function(x) { out <- new("markovchain", byrow = !x@byrow, transitionMatrix = t(x@transitionMatrix)) return(out) } ) #' @exportMethod * setGeneric("*") # function to multiplicate two markov chains # # Args: # e1: first markovchain # e2: second markov chain # # Returns: # if feasible, a markovchain where the transition matrix is e1*e2 setMethod("*", c("markovchain", "markovchain"), function(e1, e2) { # compare states of markovchains if(!setequal(e1@states, e2@states)) { warning("Warning! Different states") } # dimension must be equal if(!setequal(dim(e1@transitionMatrix), dim(e2@transitionMatrix))) { stop("Error! Different size") } # both must be either row wise or col wise if(!(e1@byrow == e2@byrow)) { stop("Error! Both transition matrix should be defined either by row or by column") } newStates <- e1@states newTransMatr <- e1@transitionMatrix %*% e2@transitionMatrix byRow <- e1@byrow # multiplicated matrix takes the first matrix's name mcName <- e1@name out<-new("markovchain", states = newStates, transitionMatrix = newTransMatr, byrow = byRow, name = mcName) return(out) } ) # methods implemented for multiplication of markovchain object with # matrix, 1-D vector, and vice-versa setMethod("*", c("matrix", "markovchain"), function(e1, e2) { out <- e1 %*% e2@transitionMatrix return(out) } ) setMethod("*", c("markovchain", "matrix"), function(e1, e2) { out <- e1@transitionMatrix %*% e2 return(out) } ) setMethod("*", c("numeric", "markovchain"), function(e1, e2) { if(length(e1) != dim(e2)) { stop("Error! Uncompatible dimensions") } else { out <- e1 %*% e2@transitionMatrix } return(out) } ) setMethod("*", c("markovchain", "numeric"), function(e1, e2) { if(length(e2) != dim(e1)) { stop("Error! Uncompatible dimensions") } else { out <- e1@transitionMatrix %*% e2 } return(out) } ) #' @exportMethod == setGeneric("==") # compare two markovchain object setMethod("==", c("markovchain", "markovchain"), function(e1, e2) { out <- .approxEqualMatricesRcpp(e1@transitionMatrix, e2@transitionMatrix) return(out) } ) #' @exportMethod != setGeneric("!=") setMethod("!=", c("markovchain", "markovchain"), function(e1, e2) { out <- FALSE out <- !(e1 == e2) return(out) } ) #'@exportMethod ^ setGeneric("^") # markovchain raise to some power # this method is O(n³ log(m)) where n = {num cols (= rows) of e1} and m = e2 setMethod("^", c("markovchain", "numeric"), function(e1, e2) { out <- new("markovchain", states = e1@states, byrow = e1@byrow, transitionMatrix = e1@transitionMatrix %^% e2, name = paste(e1@name, "^", e2, sep = "") ) return(out) } ) #' @exportMethod [ setGeneric("[") # methods to directly access transition matrix elements setMethod("[", signature(x = "markovchain", i = "ANY", j = "ANY"), function(x, i, j) { out <- x@transitionMatrix[i, j] return(out) } ) #' @exportMethod [[ setGeneric("[[") # methods to directly access markovchain objects composing a markovchainList object setMethod("[[", signature(x = "markovchainList", i = "ANY"), function(x, i) { out <- x@markovchains[[i]] return(out) } ) # transition probabilty vector from a given state #' @title \code{conditionalDistribution} of a Markov Chain #' #' @name conditionalDistribution #' #' @description It extracts the conditional distribution of the subsequent state, #' given current state. #' #' @param object A \code{markovchain} object. #' @param state Subsequent state. #' #' @author Giorgio Spedicato, Deepak Yadav #' #' @return A named probability vector #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' # define a markov chain #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1),nrow = 3, #' byrow = TRUE, dimnames = list(statesNames, statesNames))) #' #' conditionalDistribution(markovB, "b") #' #' @exportMethod conditionalDistribution setGeneric("conditionalDistribution", function(object, state) standardGeneric("conditionalDistribution")) setMethod("conditionalDistribution", "markovchain", function(object, state) { # get the states names stateNames <- states(object) # number of unique states out <- numeric(length(stateNames)) # states are assumed to be sorted index2Take <- which(stateNames == state) if(object@byrow == TRUE) { out <- object@transitionMatrix[index2Take, ] } else { out <- object@transitionMatrix[, index2Take] } # names the output and returs it names(out) <- stateNames return(out) } ) # Function to get the mode of a probability vector # # Args: # probVector: the probability vector # ties: specifies if ties are to be sampled, otherwise more than one element is returned # # Returns: # the name of the model element .getMode <- function(probVector, ties = "random") { maxIndex <- which(probVector == max(probVector)) temp <- probVector[maxIndex] # index of maximum probabilty if((ties == "random") & (length(temp) > 1)) { out <- sample(temp, 1) } else { out <- temp } return(names(out)) } #' @exportMethod predict setGeneric("predict") # predict method for markovchain objects # given initial state return a vector of next n.ahead states setMethod("predict", "markovchain", function(object, newdata, n.ahead = 1) { # identify the last state lastState <- newdata[length(newdata)] out <- character() for(i in 1:n.ahead) { # cyclically determine the most probable subsequent state from the conditional distribution newState <- .getMode(probVector = conditionalDistribution(object, lastState), ties = "random") out <- c(out, newState) lastState <- newState } return(out) } ) # predict method for markovchainList objects setMethod("predict", "markovchainList", function(object, newdata, n.ahead = 1, continue = FALSE) { # object a markovchainList # newdata = the actual data # n.ahead = how much ahead # continue = veryfy if that lasts # allocate output out <- character() actualPos <- length(newdata) lastState <- newdata[actualPos] # take last position for(i in 1:n.ahead) { newPos <- actualPos + i - 1 if(newPos <= dim(object)) { newState <- predict(object = object[[newPos]], newdata = lastState, n.ahead = 1) out <- c(out, newState) lastState <- newState } else { if(continue == TRUE) { newState <- predict(object = object[[dim(object)]], newdata = lastState, n.ahead = 1) out <- c(out, newState) lastState <- newState } else break; } } return(out) } ) #sort method for markovchain objects setGeneric("sort", function(x, decreasing=FALSE, ...) standardGeneric("sort")) setMethod("sort", signature(x="markovchain"), function(x, decreasing = FALSE) { #get matrix and state names 2 be sorted matr2besorted<-x@transitionMatrix if (x@byrow) states2besorted <- rownames(matr2besorted) else states2besorted <- colnames(matr2besorted) #sorting sort_index<-order(states2besorted,decreasing = decreasing) #reallocating matr_sorted<-matr2besorted[sort_index,sort_index] states_sorted<-states2besorted[sort_index] out<-x out@transitionMatrix<-matr_sorted out@states<-states_sorted return(out) } ) # method to get stationary states #' @name steadyStates #' @title Stationary states of a \code{markovchain} object #' #' @description This method returns the stationary vector in matricial form of a markovchain object. #' @param object A discrete \code{markovchain} object #' #' @return A matrix corresponding to the stationary states #' #' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 #' @author Giorgio Spedicato #' @seealso \code{\linkS4class{markovchain}} #' #' @note The steady states are identified starting from which eigenvectors correspond #' to identity eigenvalues and then normalizing them to sum up to unity. When negative values are found #' in the matrix, the eigenvalues extraction is performed on the recurrent classes submatrix. #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, #' byrow = TRUE, dimnames=list(statesNames,statesNames)), #' name = "A markovchain Object" #' ) #' steadyStates(markovB) #' #' @rdname steadyStates #' @exportMethod steadyStates setGeneric("steadyStates", function(object) standardGeneric("steadyStates")) markovchain/R/utils.R0000644000176200001440000000156215137702633014253 0ustar liggesusersprecomputeData <- function(mc) { list( object = mc, transitionMatrix = mc@transitionMatrix, states = mc@states, byrow = mc@byrow, irreducible = is.irreducible(mc), regular = is.regular(mc), canonicForm = canonicForm(mc), recurrentClasses = recurrentClasses(mc), transientClasses = transientClasses(mc), recurrentStates = recurrentStates(mc), transientStates = transientStates(mc), absorbingStates = absorbingStates(mc), hittingProbabilities = hittingProbabilities(mc), meanNumVisits = meanNumVisits(mc), meanRecurrenceTime = meanRecurrenceTime(mc), communicatingClasses = communicatingClasses(mc), steadyStates = steadyStates(mc), reachabilityMatrix = is.accessible(mc) ) } precomputeSteadyStates <- function(mc) { list( object = mc, expected = steadyStates(mc) ) }markovchain/R/probabilistic.R0000644000176200001440000007064615137702633015752 0ustar liggesusers# given a markovchain object is it possible to reach goal state from # a given state #' @name is.accessible #' @title Verify if a state j is reachable from state i. #' @description This function verifies if a state is reachable from another, i.e., #' if there exists a path that leads to state j leaving from state i with #' positive probability #' #' @param object A \code{markovchain} object. #' @param from The name of state "i" (beginning state). #' @param to The name of state "j" (ending state). #' #' @details It wraps an internal function named \code{reachabilityMatrix}. #' @return A boolean value. #' #' @references James Montgomery, University of Madison #' #' @author Giorgio Spedicato, Ignacio Cordón #' @seealso \code{is.irreducible} #' #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, #' transitionMatrix = matrix(c(0.2, 0.5, 0.3, #' 0, 1, 0, #' 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, #' dimnames = list(statesNames, statesNames) #' ) #' ) #' is.accessible(markovB, "a", "c") #' #' @exportMethod is.accessible setGeneric("is.accessible", function(object, from, to) standardGeneric("is.accessible")) setMethod("is.accessible", c("markovchain", "character", "character"), function(object, from, to) { # O(n²) procedure to see if to state is reachable starting at from state return(.isAccessibleRcpp(object, from, to)) } ) setMethod("is.accessible", c("markovchain", "missing", "missing"), function(object, from, to) { .reachabilityMatrixRcpp(object) } ) # a markov chain is irreducible if it is composed of only one communicating class #' @name is.irreducible #' @title Function to check if a Markov chain is irreducible (i.e. ergodic) #' @description This function verifies whether a \code{markovchain} object transition matrix #' is composed by only one communicating class. #' @param object A \code{markovchain} object #' #' @details It is based on \code{.communicatingClasses} internal function. #' @return A boolean values. #' #' @references Feres, Matlab listings for Markov Chains. #' @author Giorgio Spedicato #' #' @seealso \code{\link{summary}} #' #' @examples #' statesNames <- c("a", "b") #' mcA <- new("markovchain", transitionMatrix = matrix(c(0.7,0.3,0.1,0.9), #' byrow = TRUE, nrow = 2, #' dimnames = list(statesNames, statesNames) #' )) #' is.irreducible(mcA) #' #' @exportMethod is.irreducible setGeneric("is.irreducible", function(object) standardGeneric("is.irreducible")) setMethod("is.irreducible", "markovchain", function(object) { .isIrreducibleRcpp(object) }) # what this function will do? # It calculates the probability to go from given state # to all other states in k steps # k varies from 1 to n #' @name firstPassage #' @title First passage across states #' @description This function compute the first passage probability in states #' #' @param object A \code{markovchain} object #' @param state Initial state #' @param n Number of rows on which compute the distribution #' #' @details Based on Feres' Matlab listings #' @return A matrix of size 1:n x number of states showing the probability of the #' first time of passage in states to be exactly the number in the row. #' #' @references Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains #' #' @author Giorgio Spedicato #' @seealso \code{\link{conditionalDistribution}} #' #' @examples #' simpleMc <- new("markovchain", states = c("a", "b"), #' transitionMatrix = matrix(c(0.4, 0.6, .3, .7), #' nrow = 2, byrow = TRUE)) #' firstPassage(simpleMc, "b", 20) #' #' @export firstPassage <- function(object, state, n) { P <- object@transitionMatrix stateNames <- states(object) # row number i <- which(stateNames == state) outMatr <- .firstpassageKernelRcpp(P = P, i = i, n = n) colnames(outMatr) <- stateNames rownames(outMatr) <- 1:n return(outMatr) } #' function to calculate first passage probabilities #' #' @description The function calculates first passage probability for a subset of #' states given an initial state. #' #' @param object a markovchain-class object #' @param state intital state of the process (charactervector) #' @param set set of states A, first passage of which is to be calculated #' @param n Number of rows on which compute the distribution #' #' @return A vector of size n showing the first time proabilities #' @references #' Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains; #' MIT OCW, course - 6.262, Discrete Stochastic Processes, course-notes, chap -05 #' #' @author Vandit Jain #' #' @seealso \code{\link{firstPassage}} #' @examples #' statesNames <- c("a", "b", "c") #' markovB <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, #' 0, 1, 0, #' 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, #' dimnames = list(statesNames, statesNames) #' )) #' firstPassageMultiple(markovB,"a",c("b","c"),4) #' #' @export firstPassageMultiple <- function(object,state,set, n){ # gets the transition matrix P <- object@transitionMatrix # character vector of states of the markovchain stateNames <- states(object) k <- -1 k <- which(stateNames == state) if(k==-1) stop("please provide a valid initial state") # gets the set in numeric vector setno <- rep(0,length(set)) for(i in 1:length(set)) { setno[i] = which(set[i] == stateNames) if(setno[i] == 0) stop("please provide proper set of states") } # calls Rcpp implementation outMatr <- .firstPassageMultipleRCpp(P,k,setno,n) #sets column and row names of output colnames(outMatr) <- "set" rownames(outMatr) <- 1:n return(outMatr) } #' @name communicatingClasses #' @rdname structuralAnalysis #' @aliases transientStates recurrentStates absorbingStates communicatingClasses #' transientClasses recurrentClasses #' @title Various function to perform structural analysis of DTMC #' @description These functions return absorbing and transient states of the \code{markovchain} objects. #' #' @param object A \code{markovchain} object. #' #' @return #' \describe{ #' \item{\code{period}}{returns a integer number corresponding to the periodicity of the Markov #' chain (if it is irreducible)} #' \item{\code{absorbingStates}}{returns a character vector with the names of the absorbing #' states in the Markov chain} #' \item{\code{communicatingClasses}}{returns a list in which each slot contains the names of #' the states that are in that communicating class} #' \item{\code{recurrentClasses}}{analogously to \code{communicatingClasses}, but with #' recurrent classes} #' \item{\code{transientClasses}}{analogously to \code{communicatingClasses}, but with #' transient classes} #' \item{\code{transientStates}}{returns a character vector with all the transient states #' for the Markov chain} #' \item{\code{recurrentStates}}{returns a character vector with all the recurrent states #' for the Markov chain} #' \item{\code{canonicForm}}{returns the Markov chain reordered by a permutation of states #' so that we have blocks submatrices for each of the recurrent classes and a collection #' of rows in the end for the transient states} #' } #' #' @references Feres, Matlab listing for markov chain. #' #' @author Giorgio Alfredo Spedicato, Ignacio Cordón #' #' @seealso \code{\linkS4class{markovchain}} #' #' @examples #' statesNames <- c("a", "b", "c") #' mc <- new("markovchain", states = statesNames, transitionMatrix = #' matrix(c(0.2, 0.5, 0.3, #' 0, 1, 0, #' 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, #' dimnames = list(statesNames, statesNames)) #' ) #' #' communicatingClasses(mc) #' recurrentClasses(mc) #' recurrentClasses(mc) #' absorbingStates(mc) #' transientStates(mc) #' recurrentStates(mc) #' canonicForm(mc) #' #' # periodicity analysis #' A <- matrix(c(0, 1, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0, 1, 0), #' nrow = 4, ncol = 4, byrow = TRUE) #' mcA <- new("markovchain", states = c("a", "b", "c", "d"), #' transitionMatrix = A, #' name = "A") #' #' is.irreducible(mcA) #true #' period(mcA) #2 #' #' # periodicity analysis #' B <- matrix(c(0, 0, 1/2, 1/4, 1/4, 0, 0, #' 0, 0, 1/3, 0, 2/3, 0, 0, #' 0, 0, 0, 0, 0, 1/3, 2/3, #' 0, 0, 0, 0, 0, 1/2, 1/2, #' 0, 0, 0, 0, 0, 3/4, 1/4, #' 1/2, 1/2, 0, 0, 0, 0, 0, #' 1/4, 3/4, 0, 0, 0, 0, 0), byrow = TRUE, ncol = 7) #' mcB <- new("markovchain", transitionMatrix = B) #' period(mcB) #' #' @exportMethod communicatingClasses setGeneric("communicatingClasses", function(object) standardGeneric("communicatingClasses")) setMethod("communicatingClasses", "markovchain", function(object) { return(.communicatingClassesRcpp(object)) }) # A communicating class will be a recurrent class if # there is no outgoing edge from this class # Recurrent classes are subset of communicating classes #' @rdname structuralAnalysis #' #' @exportMethod recurrentClasses setGeneric("recurrentClasses", function(object) standardGeneric("recurrentClasses")) setMethod("recurrentClasses", "markovchain", function(object) { return(.recurrentClassesRcpp(object)) }) # A communicating class will be a transient class iff # there is an outgoing edge from this class to an state # outside of the class # Transient classes are subset of communicating classes #' @rdname structuralAnalysis #' #' @exportMethod transientClasses setGeneric("transientClasses", function(object) standardGeneric("transientClasses")) setMethod("transientClasses", "markovchain", function(object) { return(.transientClassesRcpp(object)) }) #' @rdname structuralAnalysis #' #' @exportMethod transientStates setGeneric("transientStates", function(object) standardGeneric("transientStates")) setMethod("transientStates", "markovchain", function(object) { .transientStatesRcpp(object) } ) #' @rdname structuralAnalysis #' #' @exportMethod recurrentStates setGeneric("recurrentStates", function(object) standardGeneric("recurrentStates")) setMethod("recurrentStates", "markovchain", function(object) { .recurrentStatesRcpp(object) } ) # generic function to extract absorbing states #' @rdname structuralAnalysis #' #' @exportMethod absorbingStates setGeneric("absorbingStates", function(object) standardGeneric("absorbingStates")) setMethod("absorbingStates", "markovchain", function(object) { .absorbingStatesRcpp(object) } ) #' @rdname structuralAnalysis #' #' @exportMethod canonicForm setGeneric("canonicForm", function(object) standardGeneric("canonicForm")) setMethod("canonicForm", "markovchain", function(object) { .canonicFormRcpp(object) } ) #' @title Calculates committor of a markovchain object with respect to set A, B #' #' @description Returns the probability of hitting states rom set A before set B #' with different initial states #' #' @usage committorAB(object,A,B,p) #' #' @param object a markovchain class object #' @param A a set of states #' @param B a set of states #' @param p initial state (default value : 1) #' #' @details The function solves a system of linear equations to calculate probaility that the process hits #' a state from set A before any state from set B #' #' @return Return a vector of probabilities in case initial state is not provided else returns a number #' #' @examples #' transMatr <- matrix(c(0,0,0,1,0.5, #' 0.5,0,0,0,0, #' 0.5,0,0,0,0, #' 0,0.2,0.4,0,0, #' 0,0.8,0.6,0,0.5), #' nrow = 5) #' object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr) #' committorAB(object,c(5),c(3)) #' #' @export committorAB <- function(object,A,B,p=1) { if(!is(object,"markovchain")) stop("please provide a valid markovchain object") matrix <- object@transitionMatrix noofstates <- length(object@states) for(i in length(A)) { if(A[i] <= 0 || A[i] > noofstates) stop("please provide a valid set A") } for(i in length(B)) { if(B[i] <= 0 || B[i] > noofstates) stop("please provide a valid set B") } for(i in 1:noofstates) { if(i %in% A && i %in% B) stop("intersection of set A and B in not null") } if(p <=0 || p > noofstates) stop("please provide a valid initial state") I <- diag(noofstates) matrix <- matrix - I A_size = length(A) B_size = length(B) # sets the matrix according to the provided states for(i in 1:A_size) { for(j in 1:noofstates) { if(A[i]==j) matrix[A[i],j] = 1 else matrix[A[i],j] = 0 } } # sets the matrix according to the provided states for(i in 1:B_size) { for(j in 1:noofstates) { if(B[i]==j) matrix[B[i],j] = 1 else matrix[B[i],j] = 0 } } # initialises b in the equation the system of equation AX =b b <- rep(0,noofstates) for(i in 1:A_size) { b[A[i]] = 1 } # solve AX = b according using solve function from base package out <- solve(matrix,b) if(missing(p)) return(out) else return(out[p]) } #' Expected Rewards for a markovchain #' #' @description Given a markovchain object and reward values for every state, #' function calculates expected reward value after n steps. #' #' @usage expectedRewards(markovchain,n,rewards) #' #' @param markovchain the markovchain-class object #' @param n no of steps of the process #' @param rewards vector depicting rewards coressponding to states #' #' @details the function uses a dynamic programming approach to solve a #' recursive equation described in reference. #' #' @return #' returns a vector of expected rewards for different initial states #' #' @author Vandit Jain #' #' @references Stochastic Processes: Theory for Applications, Robert G. Gallager, #' Cambridge University Press #' #' @examples #' transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) #' simpleMc<-new("markovchain", states=c("a","b"), #' transitionMatrix=transMatr) #' expectedRewards(simpleMc,1,c(0,1)) #' @export expectedRewards <- function(markovchain, n, rewards) { # gets the transition matrix matrix <- markovchain@transitionMatrix # Rcpp implementation of the function out <- .expectedRewardsRCpp(matrix,n, rewards) noofStates <- length(states(markovchain)) result <- rep(0,noofStates) for(i in 1:noofStates) result[i] = out[i] #names(result) <- states(markovchain) return(result) } #' Expected first passage Rewards for a set of states in a markovchain #' #' @description Given a markovchain object and reward values for every state, #' function calculates expected reward value for a set A of states after n #' steps. #' #' @usage expectedRewardsBeforeHittingA(markovchain, A, state, rewards, n) #' #' @param markovchain the markovchain-class object #' @param A set of states for first passage expected reward #' @param state initial state #' @param rewards vector depicting rewards coressponding to states #' @param n no of steps of the process #' #' @details The function returns the value of expected first passage #' rewards given rewards coressponding to every state, an initial state #' and number of steps. #' #' @return returns a expected reward (numerical value) as described above #' #' @author Sai Bhargav Yalamanchi, Vandit Jain #' #' @export expectedRewardsBeforeHittingA <- function(markovchain, A, state, rewards, n) { ## gets the markovchain matrix matrix <- markovchain@transitionMatrix # gets the names of states stateNames <- states(markovchain) # no of states S <- length(stateNames) # vectors for states in S-A SAno <- rep(0,S-length(A)) rewardsSA <- rep(0,S-length(A)) # for initialisation for set S-A i=1 ini = -1 for(j in 1:length(stateNames)) { if(!(stateNames[j] %in% A)){ SAno[i] = j rewardsSA[i] = rewards[j] if(stateNames[j] == state) ini = i i = i+1 } } ## get the matrix coressponding to S-A matrix <- matrix[SAno,SAno] ## cals the cpp implementation out <- .expectedRewardsBeforeHittingARCpp(matrix, ini, rewardsSA, n) return(out) } #' Mean First Passage Time for irreducible Markov chains #' #' @description Given an irreducible (ergodic) markovchain object, this function #' calculates the expected number of steps to reach other states #' #' @param object the markovchain object #' @param destination a character vector representing the states respect to #' which we want to compute the mean first passage time. Empty by default #' #' @details For an ergodic Markov chain it computes: #' \itemize{ #' \item If destination is empty, the average first time (in steps) that takes #' the Markov chain to go from initial state i to j. (i, j) represents that #' value in case the Markov chain is given row-wise, (j, i) in case it is given #' col-wise. #' \item If destination is not empty, the average time it takes us from the #' remaining states to reach the states in \code{destination} #' } #' #' @return a Matrix of the same size with the average first passage times if #' destination is empty, a vector if destination is not #' #' @author Toni Giorgino, Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(1 / 10 * c(6,3,1, #' 2,3,5, #' 4,1,5), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) #' meanFirstPassageTime(mc, "r") #' #' #' # Grinstead and Snell's "Oz weather" worked out example #' mOz <- matrix(c(2,1,1, #' 2,0,2, #' 1,1,2)/4, ncol = 3, byrow = TRUE) #' #' mcOz <- new("markovchain", states = c("s", "c", "r"), transitionMatrix = mOz) #' meanFirstPassageTime(mcOz) #' #' @export meanFirstPassageTime setGeneric("meanFirstPassageTime", function(object, destination) { standardGeneric("meanFirstPassageTime") }) setMethod("meanFirstPassageTime", signature("markovchain", "missing"), function(object, destination) { destination = character() .meanFirstPassageTimeRcpp(object, destination) } ) setMethod("meanFirstPassageTime", signature("markovchain", "character"), function(object, destination) { states <- object@states incorrectStates <- setdiff(destination, states) if (length(incorrectStates) > 0) stop("Some of the states you provided in destination do not match states from the markovchain") result <- .meanFirstPassageTimeRcpp(object, destination) asVector <- as.vector(result) names(asVector) <- colnames(result) asVector } ) #' Mean recurrence time #' #' @description Computes the expected time to return to a recurrent state #' in case the Markov chain starts there #' #' @usage meanRecurrenceTime(object) #' #' @param object the markovchain object #' #' @return For a Markov chain it outputs is a named vector with the expected #' time to first return to a state when the chain starts there. #' States present in the vector are only the recurrent ones. If the matrix #' is ergodic (i.e. irreducible), then all states are present in the output #' and order is the same as states order for the Markov chain #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(1 / 10 * c(6,3,1, #' 2,3,5, #' 4,1,5), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) #' meanRecurrenceTime(mc) #' #' @export meanRecurrenceTime setGeneric("meanRecurrenceTime", function(object) { standardGeneric("meanRecurrenceTime") }) setMethod("meanRecurrenceTime", "markovchain", function(object) { .meanRecurrenceTimeRcpp(object) }) #' Mean absorption time #' #' @description Computes the expected number of steps to go from any of the #' transient states to any of the recurrent states. The Markov chain should #' have at least one transient state for this method to work #' #' @usage meanAbsorptionTime(object) #' #' @param object the markovchain object #' #' @return A named vector with the expected number of steps to go from a #' transient state to any of the recurrent ones #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(c(1/2, 1/2, 0, #' 1/2, 1/2, 0, #' 0, 1/2, 1/2), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) #' times <- meanAbsorptionTime(mc) #' #' @export meanAbsorptionTime setGeneric("meanAbsorptionTime", function(object) { standardGeneric("meanAbsorptionTime") }) setMethod("meanAbsorptionTime", "markovchain", function(object) { .meanAbsorptionTimeRcpp(object) }) #' Absorption probabilities #' #' @description Computes the absorption probability from each transient #' state to each recurrent one (i.e. the (i, j) entry or (j, i), in a #' stochastic matrix by columns, represents the probability that the #' first not transient state we can go from the transient state i is j #' (and therefore we are going to be absorbed in the communicating #' recurrent class of j) #' #' @usage absorptionProbabilities(object) #' #' @param object the markovchain object #' #' @return A named vector with the expected number of steps to go from a #' transient state to any of the recurrent ones #' #' @author Ignacio Cordón #' #' @references C. M. Grinstead and J. L. Snell. Introduction to Probability. #' American Mathematical Soc., 2012. #' #' @examples #' m <- matrix(c(1/2, 1/2, 0, #' 1/2, 1/2, 0, #' 0, 1/2, 1/2), ncol = 3, byrow = TRUE) #' mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) #' absorptionProbabilities(mc) #' #' @export absorptionProbabilities setGeneric("absorptionProbabilities", function(object) { standardGeneric("absorptionProbabilities") }) setMethod("absorptionProbabilities", "markovchain", function(object) { .absorptionProbabilitiesRcpp(object) }) #' @title Check if a DTMC is regular #' #' @description Function to check wether a DTCM is regular # #' @details A Markov chain is regular if some of the powers of its matrix has all elements #' strictly positive #' #' @param object a markovchain object #' #' @return A boolean value #' #' @author Ignacio Cordón #' @references Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. #' Corollary 8.5.8, Theorem 8.5.9 #' #' #' @examples #' P <- matrix(c(0.5, 0.25, 0.25, #' 0.5, 0, 0.5, #' 0.25, 0.25, 0.5), nrow = 3) #' colnames(P) <- rownames(P) <- c("R","N","S") #' ciao <- as(P, "markovchain") #' is.regular(ciao) #' #' @seealso \code{\link{is.irreducible}} #' #' @exportMethod is.regular setGeneric("is.regular", function(object) standardGeneric("is.regular")) setMethod("is.regular", "markovchain", function(object) { .isRegularRcpp(object) }) #' Hitting probabilities for markovchain #' #' @description Given a markovchain object, #' this function calculates the probability of ever arriving from state i to j #' #' @usage hittingProbabilities(object) #' #' @param object the markovchain-class object #' #' @return a matrix of hitting probabilities #' #' @author Ignacio Cordón #' #' @references R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 #' #' @examples #' M <- markovchain:::zeros(5) #' M[1,1] <- M[5,5] <- 1 #' M[2,1] <- M[2,3] <- 1/2 #' M[3,2] <- M[3,4] <- 1/2 #' M[4,2] <- M[4,5] <- 1/2 #' #' mc <- new("markovchain", transitionMatrix = M) #' hittingProbabilities(mc) #' #' @exportMethod hittingProbabilities setGeneric("hittingProbabilities", function(object) standardGeneric("hittingProbabilities")) setMethod("hittingProbabilities", "markovchain", function(object) { .hittingProbabilitiesRcpp(object) }) #' Mean num of visits for markovchain, starting at each state #' #' @description Given a markovchain object, this function calculates #' a matrix where the element (i, j) represents the expect number of visits #' to the state j if the chain starts at i (in a Markov chain by columns it #' would be the element (j, i) instead) #' #' @usage meanNumVisits(object) #' #' @param object the markovchain-class object #' #' @return a matrix with the expect number of visits to each state #' #' @author Ignacio Cordón #' #' @references R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 #' #' @examples #' M <- markovchain:::zeros(5) #' M[1,1] <- M[5,5] <- 1 #' M[2,1] <- M[2,3] <- 1/2 #' M[3,2] <- M[3,4] <- 1/2 #' M[4,2] <- M[4,5] <- 1/2 #' #' mc <- new("markovchain", transitionMatrix = M) #' meanNumVisits(mc) #' #' @exportMethod meanNumVisits setGeneric("meanNumVisits", function(object) standardGeneric("meanNumVisits")) setMethod("meanNumVisits", "markovchain", function(object) { .minNumVisitsRcpp(object) }) setMethod( "steadyStates", "markovchain", function(object) { .steadyStatesRcpp(object) } ) #' @exportMethod summary setGeneric("summary") # summary method for markovchain class # lists: closed, transient classes, irreducibility, absorbint, transient states setMethod("summary", signature(object = "markovchain"), function(object){ # list of closed, recurrent and transient classes outs <- .summaryKernelRcpp(object) # display name of the markovchain object cat(object@name," Markov chain that is composed by:", "\n") # number of closed classes check <- length(outs$closedClasses) cat("Closed classes:","\n") # display closed classes if(check == 0) cat("NONE", "\n") else { for(i in 1:check) cat(outs$closedClasses[[i]], "\n") } # number of recurrent classes check <- length(outs$recurrentClasses) cat("Recurrent classes:", "\n") # display recurrent classes if(check == 0) cat("NONE", "\n") else { cat("{") cat(outs$recurrentClasses[[1]], sep = ",") cat("}") if(check > 1) { for(i in 2:check) { cat(",{") cat(outs$recurrentClasses[[i]], sep = ",") cat("}") } } cat("\n") } # number of transient classes check <- length(outs$transientClasses) cat("Transient classes:","\n") # display transient classes if(check == 0) cat("NONE", "\n") else { cat("{") cat(outs$transientClasses[[1]], sep = ",") cat("}") if(check > 1) { for(i in 2:check) { cat(",{") cat(outs$transientClasses[[i]], sep = ",") cat("}") } } cat("\n") } # bool to say about irreducibility of markovchain irreducibility <- is.irreducible(object) if(irreducibility) cat("The Markov chain is irreducible", "\n") else cat("The Markov chain is not irreducible", "\n") # display absorbing states check <- absorbingStates(object) if(length(check) == 0) check <- "NONE" cat("The absorbing states are:", check ) cat("\n") # return outs # useful when user will assign the value returned invisible(outs) } ) markovchain/R/ctmcClassesAndMethods.R0000644000176200001440000003314715137702633017332 0ustar liggesusers#' @title Continuous time Markov Chains class #' @name ctmc-class #' @aliases dim,ctmc-method initialize,ctmc_method states,ctmc-method #' steadyStates,ctmc-method plot,ctmc,missing-method #' @description The S4 class that describes \code{ctmc} (continuous #' time Markov chain) objects. #' #' @param states Name of the states. Must be the same of #' \code{colnames} and \code{rownames} of the generator matrix #' @param byrow TRUE or FALSE. Indicates whether the given matrix is #' stochastic by rows or by columns #' @param generator Square generator matrix #' @param name Optional character name of the Markov chain #' #' @section Methods: #' #' \describe{ #' \item{dim}{\code{signature(x = "ctmc")}: method to get the size} #' \item{initialize}{\code{signature(.Object = "ctmc")}: initialize #' method } #' \item{states}{\code{signature(object = "ctmc")}: states method. } #' \item{steadyStates}{\code{signature(object = "ctmc")}: method to get the #' steady state vector. } #' \item{plot}{\code{signature(x = "ctmc", y = "missing")}: plot method #' for \code{ctmc} objects } #' } #' #' @references #' Introduction to Stochastic Processes with Applications in the Biosciences #' (2013), David F. Anderson, University of Wisconsin at Madison. Sai Bhargav #' Yalamanchi, Giorgio Spedicato #' #' @note #' \enumerate{ #' \item \code{ctmc} classes are written using S4 classes #' \item Validation method is used to assess whether either columns or rows totals to zero. #' Rounding is used up to 5th decimal. If state names are not properly defined #' for a generator \code{matrix}, coercing to \code{ctmc} object leads to overriding #' states name with artificial "s1", "s2", ... sequence #' } #' @seealso \code{\link{generatorToTransitionMatrix}},\code{\link{rctmc}} #' #' @examples #' energyStates <- c("sigma", "sigma_star") #' byRow <- TRUE #' gen <- matrix(data = c(-3, 3, #' 1, -1), nrow = 2, #' byrow = byRow, dimnames = list(energyStates, energyStates)) #' molecularCTMC <- new("ctmc", states = energyStates, #' byrow = byRow, generator = gen, #' name = "Molecular Transition Model") #' steadyStates(molecularCTMC) #' \dontrun{plot(molecularCTMC)} #' #' @keywords classes #' #' @export setClass("ctmc", representation(states = "character", byrow = "logical", generator = "matrix", name = "character"), prototype(states = c("a", "b"), byrow = TRUE, generator = matrix(data = c(-1, 1, 1, -1), byrow = TRUE, nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), name = "Unnamed CTMC") ) setMethod("initialize", signature(.Object = "ctmc"), function (.Object, states, byrow, generator,name,...) { # put the standard markovchain if(missing(generator)) generator=matrix(data=c(-1, 1, 1, -1), #create a naive matrix nrow=2, byrow=TRUE, dimnames=list(c("a", "b"), c("a", "b")) ) # check names of transition matrix if(all(is.null(rownames(generator)), is.null(colnames(generator)))==TRUE) { #if all names are missing it initializes them to "1", "2",... if(missing(states)) { nr=nrow(generator) stateNames<-as.character(seq(1:nr)) } else {stateNames=states} rownames(generator)=stateNames colnames(generator)=stateNames } else if(is.null(rownames(generator))) { #fix when rownames null rownames(generator)=colnames(generator) } else if(is.null(colnames(generator))) { #fix when colnames null colnames(generator)=rownames(generator) } else if(!setequal(rownames(generator),colnames(generator))) colnames(generator)=rownames(generator) #fix when different if(missing(states)) states=rownames(generator) #assign if(missing(byrow)) byrow=TRUE #set byrow as true by default if(missing(name)) name="Unnamed Markov chain" #generic name to the object callNextMethod(.Object, states = states, byrow = byrow, generator=generator,name=name,...) } ) #returns states of the ctmc setMethod("states", "ctmc", function(object) { out <- object@states return(out) } ) #returns states of the ctmc setMethod("dim", "ctmc", function(x) { out <- nrow(x@generator) return(out) } ) setValidity("ctmc", function(object) { check<-NULL # performs a set of check whose results are saved in check if (.isGenRcpp(object@generator)==FALSE) check <- "Error! Not a generator matrix" if (object@byrow==TRUE) { if(any(round(rowSums(object@generator),5)!=0)) check <- "Error! Row sums not equal to zero" } else { if(any(round(colSums(object@generator),5)!=0)) check <- "Error! Col sums not equal to zero" } if (nrow(object@generator)!=ncol(object@generator)) check <- "Error! Not squared matrix" #check if squalre matrix if (!setequal(colnames(object@generator),object@states)) check <- "Error! Colnames <> states" #checks if if (!setequal(rownames(object@generator),object@states)) check <- "Error! Rownames <> states" if ( is.null(check) ) return(TRUE) else return(check) } ) .ctmcEigen<-function(matr, transpose=TRUE) { # Function to extract eigenvalues, core of get steady states # # Args: # matr: the matrix to extract # transpose: boolean indicating whether the matrx shall be transpose # # Results: # a matrix / vector if (transpose) tMatr <- t(matr) else tMatr <- matr #trasposing eigenResults <- eigen(x=tMatr,symmetric=FALSE) #perform the eigenvalue extraction onesIndex <- which(round(eigenResults$values,3)==1) #takes the one eigenvalue #do the following: 1:get eigenvectors whose eigenvalues==1 #2: normalize if (length(onesIndex)==0) { warning("No eigenvalue = 1 found - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } if(length(onesIndex) > 1){ warning("Eigenvalue = 1 multiplicity > 1! - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } if (transpose==TRUE) { eigenTake <- as.matrix(t(eigenResults$vectors[,onesIndex])) if(rowSums(Im(eigenTake)) != 0){ warning("Eigenvector corresponding to largest eigenvalue has a non-zero imaginary part - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } out <- eigenTake } else { eigenTake <- as.matrix(eigenResults$vectors[,onesIndex]) if(colSums(Im(eigenTake)) != 0){ warning("Eigenvector corresponding to largest eigenvalue has a non-zero imaginary part - the embedded Markov Chain must be irreducible, recurrent") return(NULL) } out <- eigenTake } return(out) } setMethod("steadyStates", "ctmc", function(object) { transposeYN <- FALSE if(object@byrow==TRUE) transposeYN <- TRUE transMatr <- generatorToTransitionMatrix(object@generator, byrow = object@byrow) out<-.ctmcEigen(matr=transMatr, transpose=transposeYN) if(is.null(out)) { warning("Warning! No steady state") return(NULL) } if(transposeYN==TRUE) { colnames(out) <- object@states } else { rownames(out) <- object@states } out <- - out / diag(object@generator) if(transposeYN==TRUE){ out <- out / rowSums(out) } else{ out <- out / colSums(out) } return(out) } ) # internal function for plotting ctmc object using igraph .getNetctmc <- function(object, round = FALSE) { # function to get the graph adjacency object to plot and export to igraph # # Args: # object: a ctmc object # round: boolean to round # # Returns: # # a graph adjacency if (object@byrow == FALSE) { object <- t(object) } #gets the generator matrix matr <- object@generator * 100 if(round == TRUE) { matr <- round(matr, 2) } net <- graph.adjacency(adjmatrix = matr, weighted = TRUE, mode = "directed") return(net) } setMethod("plot",signature(x="ctmc",y="missing"), function(x,y,package = "igraph",...){ switch(package, diagram = { if (requireNamespace("diagram", quietly = TRUE)) { .plotdiagram(object = x, ...) } else { netMc <- .getNetctmc(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, DiagrammeR = { if (requireNamespace("DiagrammeR", quietly = TRUE)) { .plotDiagrammeR(object = x, ...) } else { netMc <- .getNetctmc(object = x, round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) } }, { netMc <- .getNetctmc(object = x,round = TRUE) edgeLabel <- round(E(netMc)$weight / 100, 2) plot.igraph(x = netMc, edge.label = edgeLabel, ...) }) } ) #' An S4 class for representing Imprecise Continuous Time Markovchains #' #' @slot states a vector of states present in the ICTMC model #' @slot Q matrix representing the generator demonstrated in the form of variables #' @slot range a matrix that stores values of range of variables #' @slot name name given to ICTMC #' ictmc <- setClass("ictmc", slots = list(states = "character", Q = "matrix", range = "matrix", name = "character") ) setMethod("initialize", signature(.Object = "ictmc"), function (.Object, states, Q, range, name, ...) { if(missing(Q)) Q=matrix(data=c(-1, 1, 1, -1), #create a naive matrix nrow=2, byrow=TRUE, dimnames=list(c("n", "y"), c("n", "y")) ) if(missing(range)) range = matrix(c(1/52, 3/52, 1/2, 2), nrow = 2, byrow = 2) #if all names are missing it initializes them to "1", "2",... if(all(is.null(rownames(Q)), is.null(colnames(Q)))==TRUE) { if(missing(states)) { nr=nrow(Q) stateNames<-as.character(seq(1:nr)) } else {stateNames=states} rownames(Q)=stateNames colnames(Q)=stateNames } else if(is.null(rownames(Q))) { #fix when rownames null rownames(Q)=colnames(Q) } else if(is.null(colnames(Q))) { #fix when colnames null colnames(Q)=rownames(Q) } else if(!setequal(rownames(Q),colnames(Q))) colnames(Q)=rownames(Q) #fix when different if(missing(states)) states=rownames(Q) #assign if(missing(name)) name="Unnamed imprecise CTMC" #generic name to the object callNextMethod(.Object, states = states, Q = Q, range=range,name=name,...) } ) setValidity("ictmc", function(object) { check<-NULL # performs a set of check whose results are saved in check if (.isGenRcpp(object@Q)==FALSE) check <- "Error! Not a generator matrix" if(any(round(rowSums(object@Q),5)!=0)) check <- "Error! Row sums not equal to zero" if ( nrow(object@Q) != ncol(object@Q )) check <- "Error! Not squared matrix" #check if square matrix if ( !setequal(colnames(object@Q),object@states )) check <- "Error! Colnames <> states" if ( !setequal(rownames(object@Q),object@states )) check <- "Error! Rownames <> states" if(nrow(object@range) != nrow(object@Q) && ncol(object@range) != 2) check <- "Error! dimension of range matrix not correct." for(i in 1:nrow(object@Q)){ if( object@range[i,1] > object@range[i,2] ){ check <- "Error, improper values set in range matrix." } } if(min(object@range) < 0){ check <- "Error, values in the range matrix should be greater than zero." } if ( is.null(check) ) return(TRUE) else return(check) } ) markovchain/R/sysdata.rda0000644000176200001440000000642415137702633015132 0ustar liggesusersBZh91AY&SYdx,{ _P'Xènښ5 &hzS!zSC=M4~2zj=#(<)zF4ڍ7FOS&QM=M=FOPhѵ=4 ALORx<h d5(R4nS_ ]׆ ".XlTi;JW=\XX,aQ+h=! B2m7yAƋqw^12|Nr/7# cCl0D8a͇?' `A a"ED${EYiD6s1ZXWbaȲKy讅 b t 䑀ɿHBGA2HE HDbbS]{"`j*BpdH,UakB8yP(hUV:iEuܕ"&`wkuPQCEB'>kcM?7ֺ$ۇ;s \Hx |@!"ToNGHjP 7 c60 (\hlP0Ao @6~զ[[Ht$ "+p!AIP; ǀ A{O)'D ~L¨@ADBHŠEC 0'\hQ !`"%Ԥ 1 3Hbx|-A$RK>a`kwrnqѯ۫q{]y& "BVn@ȊF2΀h7Ӄ46.EQhY"N6Ҵΰ6]4̧AxR@S"䊠 ,2M #n6T, (6LdyPx O+d|U-G_L032b}ɢԝQ9=)n_Fk(Mx^0zì#*M\^L&@އ| ,A]vhCmn+uXl"f -GG+j$8 ˘Y!8aJIq53mzOSkTC{a߉ՑdK:KdF1D?$ĝ'6  9O 6;Yz].0ŒHj\xaHQ:,ZM64VjVw0B$ri ^2L̥3Su>X^SHф*EdVS%kGA)aQ "Xm'IrZZAB[OǣwTb bHA8)E3ٲ*qbOXm=E0Ir>=dAaoYU᷑А(_j53w0`g -؂_1T[T1W"F c-P3q ^_0 +QRq^Z,җ ޿A'] Ȝo=ԇc$ZG:r'ug8 ' rI6)k;򄫍\$'p?A2vrrŮH%I6Oq> wY3 0 if error in inversion 1 otherwise out<-tryCatch(steadyStates(as(myStochasticMatrix, "markovchain")), error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } checkSparseMInversion<-function(i,num){ #simulate the priors myStochasticMatrix<-createSparseMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch(steadyStates(as(myStochasticMatrix, "markovchain")), error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } #performing the simulation successRate<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkInversion") clusterExport(cl, "createMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkInversion,num=dimension) successRate[k]<-mean(simulations) k=k+1 } stopCluster(cl) #summarising first test: #plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate") #abline(h=0.5,col="red") #text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) #dev.off() dimensions2Test = 2^seq(from=3, to=8) successRate<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkSparseMInversion") clusterExport(cl, "createSparseMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkSparseMInversion,num=dimension) successRate[k]<-mean(simulations) k=k+1 } stopCluster(cl) # # # plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate ??? sparse matrices") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) #second test: simulating exponentiation checkExponentiation<-function(i,num){ #simulate the priors myStochasticMatrix<-createMatrix(matr_size = num) #this code returns FALSE -> 0 if error in inversion 1 otherwise out<-tryCatch((as(myStochasticMatrix, "markovchain"))^2, error=function(c) return(FALSE) ) if(class(out)=="logical") return(0) else return(1) } #performing the simulation successRate2<-numeric(length(dimensions2Test)) #using parallel backend no_cores <- detectCores() - 1 cl <- makeCluster(no_cores) clusterExport(cl, "checkExponentiation") clusterExport(cl, "createMatrix") clusterEvalQ(cl, library(markovchain)) clusterEvalQ(cl, library(MCMCpack)) k=1 for (dimension in dimensions2Test){ simulations<-parSapply(cl=cl,1:numSim,FUN=checkExponentiation,num=dimension) successRate2[k]<-mean(simulations) k=k+1 } stopCluster(cl) #summarising first test: #par(mfrow=c(1,2)) # plot(x=dimensions2Test,y=successRate,type="l",xlab="matrix size",ylab="success rate",main="Steady state computation success rate") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate,labels=round(successRate,digits=2),col="darkred",cex=0.7) # plot(x=dimensions2Test,y=successRate2,type="l",xlab="matrix sixe",ylab="success rate",main="Exponentiation computation success rate") # abline(h=0.5,col="red") # text(x=dimensions2Test,y=successRate2,labels=round(successRate2,digits=2),col="darkred",cex=0.7) markovchain/demo/00Index0000644000176200001440000000074515137702633014647 0ustar liggesusersquickStart Quick start for markovchain package examples Various examples of Markov Chain bard Structural analysis of Markov chains from Bard PPT extractMatrices Given a matrix, returns the canonical form, fundamental matrix etc... mathematica Mathematica Markov Chains reliability Simulate reliability in inverting a markovchain quasiBayesian Quasi Bayesian estimator function computationTime Computation time by size of the character sequence markovchain/demo/quickStart.R0000644000176200001440000000305215137702633015764 0ustar liggesusers# TODO: Add comment # # Author: Giorgio Spedicato ############################################################################### require(markovchain) #creates some markovchain objects statesNames=c("a","b") mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames) )) mcB<-new("markovchain", states=c("a","b","c"), transitionMatrix= matrix(c(0.2,0.5,0.3, 0,1,0, 0.1,0.8,0.1),nrow=3, byrow=TRUE)) mcC<-new("markovchain", states=c("a","b","c","d"), transitionMatrix=matrix(c(0.25,0.75,0,0,0.4, 0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) ) mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #apply some methods testConversion<-as(mcC, "data.frame") markovD<-t(mcC) steadyStates(mcC) steadyStates(mcC) #perform some fitting sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit<-markovchainFit(data=sequence,byrow=FALSE) #canonic form #from https://math.dartmouth.edu/archive/m20x06/public_html/Lecture14.pdf P<-markovchain::zeros(5) P[1,1]<-P[5,5]<-1 P[2,1]<-P[2,3]<-0.5 P[3,2]<-P[3,4]<-0.5 P[4,3]<-P[4,5]<-0.5 mcP<-as(P,"markovchain") mcPCan<-canonicForm(mcP) # coercing markov chains to sparse matrix forth and back require(Matrix) ciauz<-c(0,.5,.5,1,0,0,.2,0,.8) matrix(ciauz, nrow=3, byrow=TRUE) sparse<-as(matrix(ciauz, nrow=3, byrow=TRUE),"sparseMatrix") mc<-as(sparse,"markovchain") sparse2<-as(mc,"sparseMatrix") markovchain/demo/extractMatrices.R0000644000176200001440000000136015137702633016774 0ustar liggesusers#function to extract matrices require(markovchain) extractMatrices <- function(mcObj) { mcObj <- canonicalForm(object = mcObj) #get the indices of transient and absorbing transIdx <- which(states(mcObj) %in% transientStates(mcObj)) absIdx <- which(states(mcObj) %in% absorbingStates(mcObj)) #get Q, R and I Q <- as.matrix(mcObj@transitionMatrix[transIdx,transIdx]) R <- as.matrix(mcObj@transitionMatrix[transIdx,absIdx]) I <- as.matrix(mcObj@transitionMatrix[absIdx, absIdx]) #get fundamental matrix N <- solve(eye(size(Q)) - Q) #final absorbion probabilities NR <- N %*% R #return out <- list( canonicalForm = mcObj, Q = Q, R = R, I = I, N=N, NR=NR ) return(out) }markovchain/demo/examples.R0000644000176200001440000000121415137702633015446 0ustar liggesusers#up and down markov chains mcUpDown<-new("markovchain", states=c("up","down"), transitionMatrix=matrix(c(0,1,1,0),nrow=2, byrow=TRUE),name="UpDown") #gamblers ruin gamblerRuinMarkovChain<-function(moneyMax, prob=0.5) { matr<-markovchain::zeros(moneyMax+1) states<-as.character(seq(from=0, to=moneyMax, by=1)) rownames(matr)=states; colnames(matr)=states matr[1,1]=1;matr[moneyMax+1,moneyMax+1]=1 for(i in 2:moneyMax) { matr[i,i-1]=1-prob;matr[i,i+1]=prob } out<-new("markovchain", transitionMatrix=matr, name=paste("Gambler ruin",moneyMax,"dim",sep=" ") ) return(out) }markovchain/vignettes/0000755000176200001440000000000015137710472014573 5ustar liggesusersmarkovchain/vignettes/template.tex0000644000176200001440000000363015137702633017132 0ustar liggesusers\documentclass[ $for(classoption)$ $classoption$$sep$, $endfor$ ]{$documentclass$} \usepackage[utf8]{inputenc} \providecommand{\tightlist}{% \setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}} \author{ $for(author)$$author.name$\\$author.affiliation$$sep$ \And $endfor$ } \title{$title.formatted$} \Plainauthor{$for(author)$$author.name$$sep$, $endfor$} $if(title.plain)$ \Plaintitle{$title.plain$} $endif$ $if(title.short)$ \Shorttitle{$title.short$} $endif$ \Abstract{ $abstract$ } $if(keywords.formatted)$ \Keywords{$for(keywords.formatted)$$keywords.formatted$$sep$, $endfor$} $endif$ $if(keywords.plain)$ \Plainkeywords{$for(keywords.plain)$$keywords.plain$$sep$, $endfor$} $endif$ $if(highlighting-macros)$ $highlighting-macros$ $endif$ %% publication information %% \Volume{50} %% \Issue{9} %% \Month{June} %% \Year{2012} %% \Submitdate{$submitdate$} %% \Acceptdate{2012-06-04} \Address{ $for(author)$ $if(author.address)$ $author.name$\\ $author.affiliation$\\ $author.address$\\ $if(author.email)$E-mail: $author.email$\\$endif$ $if(author.url)$URL: $author.url$\\~\\$endif$ $endif$ $endfor$ } $if(csl-refs)$ \newlength{\cslhangindent} \setlength{\cslhangindent}{1.5em} \newenvironment{cslreferences}% {$if(csl-hanging-indent)$\setlength{\parindent}{0pt}% \everypar{\setlength{\hangindent}{\cslhangindent}}\ignorespaces$endif$}% {\par} $endif$ % Pandoc header $for(header-includes)$ $header-includes$ $endfor$ $preamble$ \begin{document} $body$ $if(natbib)$ $if(bibliography)$ $if(biblio-title)$ $if(book-class)$ \renewcommand\bibname{$biblio-title$} $else$ \renewcommand\refname{$biblio-title$} $endif$ $endif$ \bibliography{$for(bibliography)$$bibliography$$sep$,$endfor$} $endif$ $endif$ $if(biblatex)$ \printbibliography$if(biblio-title)$[title=$biblio-title$]$endif$ $endif$ \end{document} markovchain/vignettes/gsoc_2017_additions.Rmd0000644000176200001440000005244015137702633020706 0ustar liggesusers--- title: "Google Summer of Code 2017 Additions" author: "Vandit Jain" date: "August 2017" output: rmarkdown::html_vignette bibliography: markovchainBiblio.bib vignette: > %\VignetteIndexEntry{Google Summer of Code 2017 Additions} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, collapse = TRUE, comment = "#>") ``` ```{r setup_2, include=FALSE} require(markovchain) ``` # Expected Hitting Time using CTMC The package provides `ExpectedTime` function to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in I$, where $I$ is the set of all possible states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations [@NorrisBook]: \begin{equation} \begin{array}{lcr} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{array} \label{eq:EHT} \end{equation} For example, consider the continuous time markovchain which is as follows: ``` {r,message = FALSE} library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ``` The generator matrix of the ctmc is: \[ M = \left(\begin{array}{cccc} -1 & 1/2 & 1/2 & 0\\ 1/4 & -1/2 & 1/4 & 1/6\\ 1/6 & 0 & -1/3 & 1/6\\ 0 & 0 & 0 & 0 \end{array}\right) \] Now if we have to calculate expected hitting time the process will take to hit state $d$ if we start from $a$, we apply the $ExpectedTime$ function. $ExpectedTime$ function takes four inputs namely a $ctmc$ class object, initial state $i$, the final state $j$ that we have to calculate expected hitting time and a logical parameter whether to use RCpp implementation. By default, the function uses RCpp as it is faster and takes lesser time. ``` {r} ExpectedTime(ctmc,1,4) ``` We find that the expected hitting time for process to be hit state $d$ is 7 units in this case. # Calculating Probability at time T using ctmc The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. The Kolmogorov's backward equation gives us a relation between transition matrix at any time t with the generator matrix[@dobrow2016introduction]: \begin{equation} P'(t) = QP(t) \end{equation} Here we use the solution of this differential equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. In this equation, $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the conditional probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. Also to mention is that the function is also implemented using RCpp and can be used used to lessen the time of computation. It is used by default. Next, We consider both examples where initial state is given and case where initial state is not given. In the first case, the function takes two inputs, first of them is an object of the S4 class 'ctmc' and second is the final time $t$. ``` {r} probabilityatT(ctmc,1) ``` Here we get an output in the form of a transition matrix. If we take the second case i.e. considering some initial input: ``` {r} probabilityatT(ctmc,1,1) ``` In this case we get the probabilities corresponding to every state. this also includes probability that the process hits the same state $a$ after time $t=1$. # Plotting generator matrix of continuous-time markovchains The package provides a `plot` function for plotting a generator matrix $Q$ in the form of a directed graph where every possible state is assigned a node. Edges connecting these nodes are weighted. Weight of the edge going from a state $i$ to state $j$ is equal to the value $Q_{ij}$. This gives a picture of the generator matrix. For example, we build a ctmc-class object to plot it. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` Now if we plot this function we get the following graph: ``` {r} plot(molecularCTMC) ``` The figure shown is built using the $igraph$ package. The package also provides options of plotting graph using $diagram$ and $DiagrameR$ package. Plot using these packages can be built using these commands: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { plot(molecularCTMC,package = "diagram") } else { print("diagram package unavailable") } ``` Similarly, one can easily replace $diagram$ package with $DiagrammeR$. # Imprecise Continuous-Time Markov chains Continuous-time Markov chains are mathematical models that are used to describe the state-evolution of dynamical systems under stochastic uncertainty. However, building models using continuous time markovchains take in consideration a number of assumptions which may not be realistic for the domain of application; in particular; the ability to provide exact numerical parameter assessments, and the applicability of time-homogeneity and the eponymous Markov property. Hence we take ICTMC into consideration. More technically, an ICTMC is a set of “precise” continuous-time finite-state stochastic processes, and rather than computing expected values of functions, we seek to compute lower expectations, which are tight lower bounds on the expectations that correspond to such a set of “precise” models. ## Types of ICTMCs For any non-empty bounded set of rate matrices $L$, and any non-empty set $M$ of probability mass functions on $X$, we define the following three sets of stochastic processes that are jointly consistent with $L$ and $M$: * $P^{W}_{L,M}$ is the consistent set of all well-behaved stochastic processes; * $P^{WM}_{L,M}$ is the consistent set of all well-behaved Markov chains; * $P^{WHM}_{L,M}$ is the consistent set of all well-behaved homogeneous Markov chains[@ictmcpaper]. From a practical point of view, after having specified a (precise) stochastic process, one is typically interested in the expected value of some function of interest, or the probability of some event. Similarly, in this work, our main objects of consideration will be the lower probabilities that correspond to the ICTMCs. ## Lower Transition Rate Operators for ICTMCs A map $Q_{l}$ from $L(X)$ to $L(X)$ is called a lower transition rate operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[Q_{l}m](x) = 0$ 2. $[Q_{l}I](x) \geq 0 \forall y \in X$ such that $x \neq y$ 3. $[Q_{l}(f + g)](x)\geq [Q_{l}f](x) + [Q_{l}g](x)$ 4. $[Q_{l}(l f)](x) = \lambda Q_{l}f[(x)]$ ## Lower Transition Operators A map $T_{l}$ from $L (X )$ to $L (X )$ is called a lower transition operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[T_{l} f](x) \geq min(f(y) : y \in L)$ 2. $[T_{l}(f +g)](x) \geq [T_{l} f](x)+[T_{l}g](x)$ 3. $[T_{l}(\lambda f)](x) = l [T_{l} f](x)$ ## ImpreciseprobabilityatT function Now I would like to come onto the practical purpose of using ICTMC classes. ICTMC classes in these package are defined to represent a generator that is defined in such a way that every row of the generator corresponding to every state in the process is governed by a separate variable. As defined earlier, an imprecise continuous time markovchain is a set of many precise CTMCs. Hence this representation of set of precise CTMCs can be used to calulate transition probability at some time in future. This can be seen as an analogy with `probabilityatT` function. It is used to calculate the transition function at some later time t using generatoe matrix. For every generator matrix, we have a corresponding transition function. Similarly, for every Lower Transition rate operator of an ICTMC, we have a corresponding lower transition operator denoted by $L_{t}^{s}$. Here $t$ is the initial time and $s$ is the final time. Now we mention a proposition[@ictmcpaper] which states that: Let $Q_{l}$ be a lower transition rate operator, choose any time $t$ and $s$ both greater than 0 such that $t \leq s$, and let $L_{t}^{s}$ be the lower transition operator corresponding to $Q_{l}$. Then for any $f \in L(X)$ and $\epsilon \in R_{>0}$, if we choose any $n \in N$ such that: \[n \geq max((s-t)*||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\] with $||f||_{v}$ := max $f$ - min $f$, we are guaranteed that[@ictmcpaper] \[ ||L_{t}^{s} - \prod_{i=1}^{n}(I + \Delta Q_{l}) || \leq \epsilon \] with $\Delta := \frac{s-t}{n}$ Simple put this equation tells us that, using $Q_{l}g$ for all $g \in L(X)$ then we can also approximate the quantity $L_{t}^{s}$ to arbitrary precision, for any given $f \in L(X)$. To explain this approximate calculation, I would take a detailed example of a process containing two states healthy and sick, hence $X = (healthy,sick)$. If we represent in form of an ICTMC, we get: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) \] for some $a,b \in R_{\geq 0}$. The parameter $a$ here is the rate at which a healthy person becomes sick. Technically, this means that if a person is healthy at time $t$, the probability that he or she will be sick at time $t +\Delta$, for small $\Delta$, is very close to $\Delta a$. More intuitively, if we take the time unit to be one week, it means that he or she will, on average, become sick after $\frac{1}{a}$ weeks. The parameter $b$ is the rate at which a sick person becomes healthy again, and has a similar interpretation. Now to completely represent the ICTMC we take an example and write the generator as: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) : a \in [\frac{1}{52},\frac{3}{52}],b \in [\frac{1}{2},2] \] Now suppose we know the initial state of the patient to be sick, hence this is represented in the form of a function by: \[ I_{s} = \left(\begin{matrix} 0 \\ 1 \end{matrix}\right) \] We observe that the $||I_{s}|| = 1$. Now to use the proposition mentioned above, we use the definition to calculate the lower transition operator $Q_{l}$ Next we calculate the norm of the lower transition rate operator and use it in the preposition. Also we take value of $\epsilon$ to be 0.001. Using the preposition we can come up to an algorithm for calculating the probability at any time $s$ given state at initial time $t$ and a ICTMC generator[@ictmcpaper]. The algorithm is as follows: **Input**: A lower transition rate operator $Q$, two time points $t,s$ such that $t \leq s$, a function $f \in L(X )$ and a maximum numerical error $\epsilon \in R_{>0}$. **Algorithm**: 1. $n = max((s-t)||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)$ 2. $\Delta = \frac{s-t}{n}$ 3. $g_{0} = I_{s}$ 4. for $i \in (1,.....,n)$ do $g_{i} = g_{i-1} + \Delta Q_{l}g_{i-1}$ 5. end for 6. return $g_{n}$ **Output**: The conditional probability vector after time $t$ with error $\epsilon$. Hence, after applying the algorithm on above example we get the following result: $ g_{n} = 0.0083$ if final state is $healthy$ and $g_{n} = 0.141$ if the final state is $sick$. The probability calculated is with an error equal to $\epsilon$ i.e. $0.001$. Now we run the algorithm on the example through R code. ``` {r} states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ``` The probabilities we get are with an error of $10^{-3}$ # Continuous time markovchain generator using frequency Matrix The package provides `freq2Generator` function. It takes in a matrix representing relative frequency values along with time taken to provide a continuous time markovchain generator matrix. Here, frequency matrix is a 2-D matrix of dimensions equal to relative number of possible states describing the number of transitions from a state $i$ to $j$ in time $t$, which is another parameter to be provided to the function. The function also allows to chose among three methods for calculation of the generator matrix [@freqArticle]. It requires the [@pkg:ctmcd] package. Three methods are as follows: 1. Quasi Optimization - "QO" 2. Diagonal Adjustment - "DA" 3. Weighted Adjustment - "WA" See reference for details about the methods. Here is an example matrix on which `freq2Generator` function is run: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) } else { print('ctmcd unavailable') } ``` # Committor of a markovchain Consider set of states A,B comprising of states from a markovchain with transition matrix P. The committor vector of a markovchain with respect to sets A and B gives the probability that the process will hit a state from set A before any state from set B. Committor vector u can be calculated by solving the following system of linear equations[@committorlink]: $$ \begin{array}{l} Lu(x) = 0, x \notin A \cup B \\ u(x) = 1, x \in A \\ u(x) = 0, x \in B \end{array} $$ where $L = P -I$. Now we apply the method to an example: ``` {r eval=FALSE} transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") committorAB(object,c(5),c(3)) ``` Here we get probability that the process will hit state "e" before state "c" given different initial states. # First Passage probability for set of states Currently computation of the first passage time for individual states has been implemented in the package. `firstPassageMultiple` function provides a method to get first passage probability for given provided set of states. Consider this example markovchain object: ``` {r} statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ``` Now we apply `firstPassageMultiple` function to calculate first passage probabilities for set of states $"b", "c"$ when initial state is $"a"$. ``` {r} firstPassageMultiple(testmarkov,"a",c("b","c"),4) ``` This shows us the probability that the process will hit any of the state from the set after n number of steps for instance, as shown, the probability of the process to hit any of the states among $"b", "c"$ after $2$ steps is $0.6000$. # Joint PDF of number of visits to the various states of a markovchain The package provides a function `noofVisitsDist` that returns the PDF of the number of visits to the various states of the discrete time markovchain during the first N steps, given initial state of the process. We will take an example to see how to use the function on a `markovchain-class` object: ``` {r} transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ``` The output clearly shows the probabilities related to various states of the process. # Expected Rewards for a markovchain The package provides a function `expectedRewards` that returns a vector of expected rewards for different initial states. The user provides reward values, a vector $r$ of size equal to number of states having a value corresponding to every state. Given a transition matrix $[P]$, we get the vector of expected rewards $v$ after $n$ transitions according to the equation as follows[@GallagerBook]: $v[n] = r + [P]*v[n-1]$ Applying this equation on a markovchain-class object ``` {r} transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ``` ## Expected Rewards for a set of states in a markovchain process The package provides a function `expectedRewardsBeforeHittingA` that returns the value of expected first passage rewards $E$ given rewards corresponding to every state, an initial state. This means the function returns expected reward for given initial state $s_{0}$, number of transitions $n$ and for a set of states $A$ with a constraint such that the process does not hit any of the states that belong to state $A$. $S$ is the set of all possible states. The function uses an equation which is as follows: $$E = \sum_{i=1}^{n}{1_{s_{0}}P_{S-A}^{i}R_{S-A}}$$ here $1_{s_{0}} = [0,0,...0,1,0,...,0,0,0]$, 1 being on $s_{0}$ position and $R_{S-A}$ being the rewards vector for $S-A$ state. # Checking Irreducibly of a CTMC The package provides a function `is.CTMCirreducible` that returns a Boolean value stating whether the ctmc object is irreducible. We know that a continuous time markovchain is irreducible if and only if its embedded chain is irreducible[@Sigman]. We demonstrate an example running the function: ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ``` # Simulation of Higher Order Multivariate Markovchains The package provides `predictHommc` function. This function provides a simulation system for higher order multivariate markovchains. The function assumes that the state probability distribution of the jth sequence at time $r+1$ depends on the state probability distribution of all the sequences at n previous mon=ments of time i.e. $t = r$ to $t = r-n+1$ . Hence the proposed model takes the form mathematically as:[@ching2008higher] $$ X_{r+1}^{j} = \sum_{k=1}^{s}\sum_{h=1}^n{\lambda_{jk}^{(h)}P_{h}^{(jk)}X_{r-h+1}^{(k)}}, \ \ \ j = 1,2,....s, \ \ r = n-1,n,... $$ with initals $X_{0}^{(k)},X_{1}^{(k)},......,X_{n-1}^{(k)} \ (k = 1,2,...s)$. Here, $\lambda_{jk}^{(k)}, \ 1 \leq j,k \leq s, \ 1 \leq h \leq n \ \ \ and \ \ \ \sum_{k=1}^{s}\sum_{h=1}^{n}{\lambda_{jk}^{(h)} = 1}, \ \ \ j = 1,2,....s.$ Now we run an example on sample hommc object for simulating next 3 steps using `predictHommc` function. The function provides a choice of entering initial states according to the hommc object. In case the user does not enter initial states, the function takes all initial states to be the first state from the set of states. ``` {r} if (requireNamespace("Rsolnp", quietly = TRUE)) { statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) } else { print("Rsolnp unavailable") } ``` # Check Time Reversibility of Continuous-time markovchains A Continuous-time markovchain with generator $Q$ and stationary distribution $\pi$ is said to be time reversible if:[@dobrow2016introduction] $$ \pi_{i}q_{ij} = \pi_{j}q_{ji} $$ Intuitively, a continuous-time Markov chain is time reversible if the process in forward time is indistinguishable from the process in reversed time. A consequence is that for all states i and j, the long-term forward transition rate from i to j is equal to the long-term backward rate from j to i. The package provides `is.TimeReversible` function to check if a `ctmc` object is time-reversible. We follow with an example run on a `ctmc` object. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) ``` # References markovchain/vignettes/markovchainBiblio.bib0000644000176200001440000004701715137710106020677 0ustar liggesusers% Encoding: UTF-8 % Formatted with emacs using C-c C-q in BibTeX mode @book{dobrow2016introduction, title = {Introduction to Stochastic Processes with R}, author = {Dobrow, Robert P}, year = {2016}, publisher = {John Wiley \& Sons} } @manual{pkg:ctmcd, title = {ctmcd: Estimating the Parameters of a Continuous-Time Markov Chain from Discrete-Time Data}, author = {Marius Pfeuffer}, year = {2017}, note = {R package version 1.1}, url = {https://CRAN.R-project.org/package=ctmcd} } @article{Hu2002, author = {Hu, Yen Ting and Kiesel, Rudiger and Perraudin, William}, doi = {10.1016/S0378-4266(02)00268-6}, isbn = {0378-4266}, issn = {03784266}, journal = {Journal of Banking and Finance}, keywords = {Credit rating,Credit risk,Ordered probit,Sovereign default}, number = {7}, pages = {1383--1406}, title = {{The estimation of transition matrices for sovereign credit ratings}}, volume = {26}, year = {2002} } @book{de2016assicurazioni, title = {Assicurazioni sulla salute: caratteristiche, modelli attuariali e basi tecniche}, author = {{De Angelis, Paolo} and {Di Falco, L.}}, isbn = {9788815260840}, series = {Il Mulino}, url = {https://books.google.it/books?id=D56bjgEACAAJ}, year = {2016}, publisher = {Il Mulino} } @mastersthesis{skuriat2005statistical, author = "Skuriat-Olechnowska, Monika", title = "Statistical inference and hypothesis testing for Markov chains with Interval Censoring", school = "TUDelft", year = "2005" } @article{kullback1962tests, title = {Tests for Contingency Tables and Marltov Chains}, author = {Kullback, S and Kupperman, Michael and Ku, HH}, journal = {Technometrics}, volume = {4}, number = {4}, pages = {573--608}, year = {1962}, publisher = {Taylor \& Francis} } @article{israel2001finding, title = {Finding generators for Markov chains via empirical transition matrices, with applications to credit ratings}, author = {Israel, Robert B and Rosenthal, Jeffrey S and Wei, Jason Z}, journal = {Mathematical finance}, volume = {11}, number = {2}, pages = {245--265}, year = {2001}, publisher = {Wiley Online Library} } @manual{pkg:RcppParallel, title = {RcppParallel: Parallel Programming Tools for 'Rcpp'}, author = {JJ Allaire and Romain Francois and Kevin Ushey and Gregory Vandenbrouck and Marcus Geelnard and {Intel}}, year = {2016}, note = {R package version 4.3.19}, url = {https://rcppcore.github.io/RcppParallel/} } @manual{pkg:MultinomialCI, title = {MultinomialCI: Simultaneous confidence intervals for multinomial proportions according to the method by Sison and Glaz}, author = {Pablo J. Villacorta}, year = {2012}, note = {R package version 1.0}, url = {https://CRAN.R-project.org/package=MultinomialCI} } @article{sison1995simultaneous, title = {Simultaneous confidence intervals and sample size determination for multinomial proportions}, author = {Sison, Cristina P and Glaz, Joseph}, journal = {Journal of the American Statistical Association}, volume = {90}, number = {429}, pages = {366--369}, year = {1995}, publisher = {Taylor \& Francis} } @article{konstantopoulos2009markov, title = {Markov Chains and Random Walks}, author = {Konstantopoulos, Takis}, journal = {Lecture notes}, year = {2009} } @article{craigSendi, author = {{B. A. Craig} and {A. A. Sendi}}, title = {Estimation of the Transition Matrix of a Discrete-Time Markov Chain}, journal = {Health Economics}, year = {2002}, volume = {11}, pages = {33--42} } @misc{bardPpt, author = {J. F. Bard}, title = {Lecture 12.5 - Additional Issues Concerning Discrete-Time Markov Chains}, month = {April}, year = {2000}, url ={https://www.me.utexas.edu/~jensen%20/ORMM/instruction/powerpoint/or_models_09/12.5_dtmc2.ppt} } @incollection{bremaud1999discrete, author = {Br{\'e}maud, Pierre}, title = {Discrete-Time Markov Models}, booktitle = {Markov Chains}, publisher = {Springer}, year = {1999}, pages = {53--93} } @book{chambers, title = {Software for Data Analysis: Programming with \proglang{R}}, publisher = {Springer-Verlag}, year = {2008}, author = {Chambers, J.M.}, series = {Statistics and computing}, isbn = {9780387759357}, lccn = {2008922937} } @book{ching2006markov, title = {Markov Chains: Models, Algorithms and Applications}, publisher = {Springer-Verlag}, year = {2006}, author = {Ching, W.K. and Ng, M.K.}, series = {International Series in Operations Research \& Management Science}, isbn = {9780387293356}, lccn = {2005933263} } @article{pkg:igraph, author = {Gabor Csardi and Tamas Nepusz}, title = {The \pkg{igraph} Software Package for Complex Network Research}, journal = {InterJournal}, year = {2006}, volume = {Complex Systems}, pages = {1695}, url = {https://igraph.sourceforge.net/} } @book{denuit2007actuarial, title = {Actuarial modelling of claim counts: Risk classification, credibility and bonus-malus systems}, publisher = {Wiley}, year = {2007}, author = {Denuit, Michel and Mar{\'e}chal, Xavier and Pitrebois, Sandra and Walhin, Jean-Fran{\c{c}}ois} } @book{deshmukh2012multiple, title = {Multiple Decrement Models in Insurance: An Introduction Using \proglang{R}}, publisher = {Springer-Verlag}, year = {2012}, author = {Deshmukh, S.R.}, series = {SpringerLink : B{\"u}cher}, isbn = {9788132206590}, lccn = {2012942476} } @book{RcppR, title = {Seamless \proglang{R} and \proglang{C++} Integration with \pkg{Rcpp}}, publisher = {Springer-Verlag}, year = {2013}, author = {Dirk Eddelbuettel}, address = {New York}, note = {ISBN 978-1-4614-6867-7} } @misc{renaldoMatlab, author = {Renaldo Feres}, title = {Notes for Math 450 \proglang{MATLAB} Listings for Markov Chains}, year = {2007}, url = {https://www.math.wustl.edu/~feres/Math450Lect04.pdf} } @manual{mcmcR, title = {mcmc: Markov Chain Monte Carlo}, author = {Charles J. Geyer and Leif T. Johnson}, year = {2013}, note = {\proglang{R} package version 0.9-2}, url = {https://CRAN.R-project.org/package=mcmc} } @incollection{glassHall, author = {Glass, D.V. and Hall, J. R.}, title = {Social Mobility in Great Britain: A Study in Intergenerational Change in Status}, booktitle = {Social Mobility in Great Britain}, publisher = {Routledge and Kegan Paul}, year = {1954} } @manual{expmR, title = {\pkg{expm}: Matrix Exponential}, author = {Vincent Goulet and Christophe Dutang and Martin Maechler and David Firth and Marina Shapira and Michael Stadelmann and {expm-developers@lists.R-forge.R-project.org}}, year = {2013}, note = {R package version 0.99-1}, url = {https://CRAN.R-project.org/package=expm} } @manual{hmmR, title = {\pkg{HMM}: HMM - Hidden Markov Models}, author = {Scientific Software Development - Dr. Lin Himmelmann and {www.linhi.com}}, year = {2010}, note = {\proglang{R} package version 1.0}, url = {https://CRAN.R-project.org/package=HMM} } @book{landOfOz, title = {Introduction to Finite Mathematics}, publisher = {Prentice Hall}, year = {1974}, author = {{J. G. Kemeny} and {J. L.Snell} and {G. L. Thompson}} } @article{msmR, author = {Christopher H. Jackson}, title = {Multi-State Models for Panel Data: The \pkg{msm} Package for \proglang{R}}, journal = {Journal of Statistical Software}, year = {2011}, volume = {38}, pages = {1--29}, number = {8}, url = {https://www.jstatsoft.org/v38/i08/} } @misc{manchesterR, author = {Ian Jacob}, month = {May}, year = {2014}, title = {Is R Cost Effective?}, language = {En}, howpublished = {Electronic}, organization = {Manchester Centre for Health Economics}, note = {Presented on Manchester R Meeting} } @TECHREPORT{blandenEtAlii, author = {Jo Blanden, Paul Gregg and Stephen Machin}, title = {Intergenerational Mobility in Europe and North America}, institution = {Center for Economic Performances}, year = {2005}, owner = {Giorgio Copia}, timestamp = {2014.01.01}, url ={https://cep.lse.ac.uk/about/news/IntergenerationalMobility.pdf} } @misc{Konstantopoulos2009, author = {Konstantopoulos, Takis}, title = {Markov Chains and Random Walks}, year = {2009}, url = {https://www2.math.uu.se/~takis/public_html/McRw/mcrw.pdf} } @misc{montgomery, author = {James Montgomery}, title = {Communication Classes}, year = {2009}, url = {https://www.ssc.wisc.edu/~jmontgom/commclasses.pdf} } @manual{DTMCPackR, title = {\pkg{DTMCPack}: Suite of Functions Related to Discrete-Time Discrete-State Markov Chains}, author = {William Nicholson}, year = {2013}, note = {R package version 0.1-2}, url = {https://CRAN.R-project.org/package=DTMCPack} } @article{averyHenderson, author = {{P. J. Avery} and {D. A. Henderson}}, title = {Fitting Markov Chain Models to Discrete State Series}, journal = {Applied Statistics}, year = {1999}, volume = {48}, pages = {53-61}, number = {1} } @manual{rSoftware, title = {\proglang{R}: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2013}, url = {https://www.R-project.org/} } @manual{pkg:matlab, title = {\pkg{matlab}: \proglang{MATLAB} emulation package}, author = {P. Roebuck}, year = {2011}, note = {R package version 0.8.9}, url = {https://CRAN.R-project.org/package=matlab} } @misc{probBook, author = {Laurie Snell}, title = {Probability Book: chapter 11}, year = {1999}, url = {https://web.archive.org/web/20150721052835/http://www.dartmouth.edu/~chance/teaching_aids/books_articles/probability_book/Chapter11.pdf} } @article{pkg:markovchain, author = {Giorgio Alfredo Spedicato}, title = {{Discrete Time Markov Chains with R}}, year = {2017}, journal = {{The R Journal}}, url = {https://journal.r-project.org/articles/RJ-2017-036/index.html} } @manual{MAPmcR, title = {Bayesian Inference of First Order Markov Chains}, author = {Sai Bhargav Yalamanchi and Giorgio Alfredo Spedicato}, month = {06}, year = {2015}, note = {R package version 0.2.5} } @misc{wiki:markov, author = {Wikipedia}, title = {Markov chain --- Wikipedia{,} The Free Encyclopedia}, year = {2013}, note = {[Online; accessed 23-August-2013]}, url = {https://en.wikipedia.org/w/index.php?title=Markov_chain&oldid=568910294} } @manual{CreditMetricsR, title = {CreditMetrics: Functions for Calculating the CreditMetrics Risk Model}, author = {Andreas Wittmann}, year = {2007}, note = {\proglang{R} package version 0.0-2} } @manual{mathematica9, title = {\proglang{Mathematica}}, author = {Wolfram Research, Inc.}, organization = {Wolfram Research, Inc.}, edition = {ninth}, year = {2013}, adddress = {Champaign, Illinois} } @misc{mathematica9MarkovChain, author = {Wolfram Research, Inc.}, year = {2013}, organization = {Wolfram Research, Inc.}, url ={https://www.wolfram.com/mathematica/new-in-9/markov-chains-and-queues/structural-properties-of-finite-markov-processes.html} } @article{sch, author = {Christopher C. Strelioff and James P. Crutchfield and Alfred W. Hubler}, title = {Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-class Modeling}, journal = {Sante Fe Working Papers}, year = {2007}, url = {https://arxiv.org/abs/math/0703715/} } @article{mstateR, author = {Liesbeth C. de Wreede and Marta Fiocco and Hein Putter}, title = {\pkg{mstate}: An \proglang{R} Package for the Analysis of Competing Risks and Multi-State Models}, journal = {Journal of Statistical Software}, year = {2011}, volume = {38}, pages = {1--30}, number = {7}, url = {https://www.jstatsoft.org/v38/i07/} } @mastersthesis{MSkuriat, author = {Monika Skuriat-Olechnowska}, title = {Statistical inference and hypothesis testing for Markov chains with Interval Censoring}, school = {Delft University of Technology}, type = {diploma thesis}, year = {2005} } @misc{wiki:CI, author = {Wikipedia}, title = {Confidence interval --- Wikipedia{,} The Free Encyclopedia}, year = {2015}, url = {https://en.wikipedia.org/w/index.php?title=Confidence_interval&oldid=667353161}, note = {[Online; accessed 21-June-2015]} } @book{pardo2005statistical, title = {Statistical inference based on divergence measures}, author = {Pardo, Leandro}, year = {2005}, publisher = {CRC Press} } @article{anderson1957statistical, title = {Statistical inference about Markov chains}, author = {Anderson, Theodore W and Goodman, Leo A}, journal = {The Annals of Mathematical Statistics}, pages = {89--110}, year = {1957}, publisher = {JSTOR} } @article{ching2008higher, title = {Higher-order multivariate Markov chains and their applications}, author = {Ching, Wai-Ki and Ng, Michael K and Fung, Eric S}, journal = {Linear Algebra and its Applications}, volume = {428}, number = {2}, pages = {492--507}, year = {2008}, publisher = {Elsevier} } @incollection{ching2013higher, title = {Higher-order markov chains}, author = {Ching, Wai-Ki and Huang, Ximin and Ng, Michael K and Siu, Tak-Kuen}, booktitle = {Markov Chains}, pages = {141--176}, year = {2013}, publisher = {Springer} } @manual{pkg:Rsolnp, title = {Rsolnp: General Non-linear Optimization Using Augmented Lagrange Multiplier Method}, author = {Alexios Ghalanos and Stefan Theussl}, year = {2014}, note = {R package version 1.15.} } @PhdThesis{YinyuYe, title = {Interior Algorithms for Linear, Quadratic, and Linearly Constrained Non-Linear Programming}, author = {Yinyu Ye}, year = {1987}, school = {Department of {ESS}, Stanford University} } @book{NorrisBook, title = {Markovchains}, author = {J. R. Norris}, year = {1998}, publisher = {Cambridge University Press} } @article{ictmcpaper, author = {Thomas Krak, Jasper De Bock, Arno Siebes}, title = {Imprecise Continuous Time Markov Chains}, journal = {International Journal of Approximate Reasoning}, year = {2017}, volume = {88}, pages = {452-528}, publisher = {arXiv} } @misc{committorlink, author = {StackOverflow}, url = {https://math.stackexchange.com/questions/1450399/probability-that-a-chain-will-enter-state-5-before-it-enters-state-3?newreg=82f90b66b949495a91661caad24db915}, year = {2015} } @book{GallagerBook, title = {Stochastic Processes: Theory for Applications}, author = {Robert G. Gallager}, year = {2013}, publisher = {Cambridge University Press} } @misc{Sigman, title = {Continuous Time markovchains}, author = {Karl Sigman}, year = {2009}, publisher = {Columbia University} } @article{freqArticle, author = {Alexander Kreinin, Marina Sidelnikova}, title = {Regularization Algorithms for Transition Matrices}, journal = {Algo Research Quarterly}, year = {2001}, volume = {4}, number = {1/2}, pages = {23--40}, month = mar, publisher = {ALGO RESEARCH QUARTERLY} } @book{GrinsteadSnell, author = {Grinstead, Charles M. and Snell, Laurie J.}, booktitle = {Introduction to Probability}, edition = {Version dated 4 July 2006}, publisher = {American Mathematical Society}, title = {{Grinstead and Snell's Introduction to Probability}}, url = {https://math.dartmouth.edu/~prob/prob/prob.pdf}, year = {2006} } @article{noe_constructing_2009, title = {Constructing the equilibrium ensemble of folding pathways from short off-equilibrium simulations}, volume = {106}, copyright = {© 2009 . Freely available online through the PNAS open access option.}, issn = {0027-8424, 1091-6490}, url = {https://www.pnas.org/content/106/45/19011}, doi = {10.1073/pnas.0905466106}, language = {en}, number = {45}, urldate = {2019-01-13}, journal = {Proceedings of the National Academy of Sciences}, author = {Noé, Frank and Schütte, Christof and Vanden-Eijnden, Eric and Reich, Lothar and Weikl, Thomas R.}, month = nov, year = {2009}, pmid = {19887634}, keywords = {committor}, pages = {19011--19016} } @article{Tarjan, doi = {10.1137/0201010}, url = {https://doi.org/10.1137/0201010}, year = {1972}, month = jun, publisher = {Society for Industrial {\&} Applied Mathematics ({SIAM})}, volume = {1}, number = {2}, pages = {146--160}, author = {Robert Tarjan}, title = {Depth-First Search and Linear Graph Algorithms}, journal = {{SIAM} Journal on Computing} } markovchain/vignettes/higher_order_markov_chains.Rmd0000644000176200001440000003006615137702633022611 0ustar liggesusers--- title: "Higher order Markov chains" pagetitle: "Higher order Markov chains" output: rmarkdown::html_vignette author: - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D FCAS FSA CSPA C.Stat, Unipol Group address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedicato\_giorgio@yahoo.it} preamble: > \author{Deepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato} \usepackage{graphicx} \usepackage{amsmath} \usepackage{tabularx} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} abstract: | The \pkg{markovchain} package contains functions to fit higher (possibly) multivariate order Markov chains. The functions are shown as well as simple exmaples vignette: > %\VignetteIndexEntry{Higher order Markov chains} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [Higher order Markov chains] formatted: [Higher order Markov chains] bibliography: markovchainBiblio.bib --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ``` # Higher Order Markov Chains Continuous time Markov chains are discussed in the CTMC vignette which is a part of the package. An experimental `fitHigherOrder` function has been written in order to fit a higher order Markov chain ([@ching2013higher, @ching2008higher]). `fitHigherOrder` takes two inputs 1. sequence: a categorical data sequence. 2. order: order of Markov chain to fit with default value 2. The output will be a `list` which consists of 1. lambda: model parameter(s). 2. Q: a list of transition matrices. $Q_i$ is the $ith$ step transition matrix stored column-wise. 3. X: frequency probability vector of the given sequence. Its quadratic programming problem is solved using `solnp` function of the Rsolnp package [@pkg:Rsolnp]. ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, collapse = TRUE, comment = "#>") ``` ```{r setup_2, include=FALSE, message=FALSE, echo=FALSE} require(markovchain) ``` ```{r higherOrder} if (requireNamespace("Rsolnp", quietly = TRUE)) { library(Rsolnp) data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) } ``` # Higher Order Multivariate Markov Chains ## Introduction HOMMC model is used for modeling behaviour of multiple categorical sequences generated by similar sources. The main reference is [@ching2008higher]. Assume that there are s categorical sequences and each has possible states in M. In nth order MMC the state probability distribution of the jth sequence at time $t = r + 1$ depend on the state probability distribution of all the sequences (including itself) at times $t = r, r - 1, ..., r - n + 1$. \[ x_{r+1}^{(j)} = \sum_{k=1}^{s}\sum_{h=1}^{n}\lambda_{jk}^{(h)}P_{h}^{(jk)}x_{r-h+1}^{(k)}, j = 1, 2, ..., s, r = n-1, n, ... \] with initial distribution $x_{0}^{(k)}, x_{1}^{(k)}, ... , x_{n-1}^{(k)} (k = 1, 2, ... , s)$. Here \[ \lambda _{jk}^{(h)} \geq 0, 1\leq j, k\leq s, 1\leq h\leq n \enspace and \enspace \sum_{k=1}^{s}\sum_{h=1}^{n} \lambda_{jk}^{(h)} = 1, j = 1, 2, 3, ... , s. \] Now we will see the simpler representation of the model which will help us understand the result of `fitHighOrderMultivarMC` method. \vspace{5mm} Let $X_{r}^{(j)} = ((x_{r}^{(j)})^{T}, (x_{r-1}^{(j)})^{T}, ..., (x_{r-n+1}^{(j)})^{T})^{T} for \enspace j = 1, 2, 3, ... , s.$ Then \vspace{5mm} \[ \begin{pmatrix} X_{r+1}^{(1)}\\ X_{r+1}^{(2)}\\ .\\ .\\ .\\ X_{r+1}^{(s)} \end{pmatrix} = \begin{pmatrix} B^{11}& B^{12}& .& .& B^{1s}& \\ B^{21}& B^{22}& .& .& B^{2s}& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ B^{s1}& B^{s2}& .& .& B^{ss}& \\ \end{pmatrix} \begin{pmatrix} X_{r}^{(1)}\\ X_{r}^{(2)}\\ .\\ .\\ .\\ X_{r}^{(s)} \end{pmatrix} \textrm{where} \] \[B^{ii} = \begin{pmatrix} \lambda _{ii}^{(1)}P_{1}^{(ii)}& \lambda _{ii}^{(2)}P_{2}^{(ii)}& .& .& \lambda _{ii}^{(n)}P_{n}^{(ii)}& \\ I& 0& .& .& 0& \\ 0& I& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& I& 0& \end{pmatrix}_{mn*mn} \textrm{and} \] \vspace{5mm} \[ B^{ij} = \begin{pmatrix} \lambda _{ij}^{(1)}P_{1}^{(ij)}& \lambda _{ij}^{(2)}P_{2}^{(ij)}& .& .& \lambda _{ij}^{(n)}P_{n}^{(ij)}& \\ 0& 0& .& .& 0& \\ 0& 0& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& 0& 0& \end{pmatrix}_{mn*mn} \textrm{when } i\neq j. \] \vspace{5mm} ## Representation of parameters in the code $P_{h}^{(ij)}$ is represented as $Ph(i,j)$ and $\lambda _{ij}^{(h)}$ as Lambdah(i,j). For example: $P_{2}^{(13)}$ as $P2(1,3)$ and $\lambda _{45}^{(3)}$ as Lambda3(4,5). ## Definition of HOMMC class ```{r hommcObject} showClass("hommc") ``` Any element of `hommc` class is comprised by following slots: 1. states: a character vector, listing the states for which transition probabilities are defined. 2. byrow: a logical element, indicating whether transition probabilities are shown by row or by column. 3. order: order of Multivariate Markov chain. 4. P: an array of all transition matrices. 5. Lambda: a vector to store the weightage of each transition matrix. 6. name: optional character element to name the HOMMC ## How to create an object of class HOMMC ```{r hommcCreate} states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ``` ## Fit HOMMC `fitHighOrderMultivarMC` method is available to fit HOMMC. Below are the 3 parameters of this method. 1. seqMat: a character matrix or a data frame, each column represents a categorical sequence. 2. order: order of Multivariate Markov chain. Default is 2. 3. Norm: Norm to be used. Default is 2. # A Marketing Example We tried to replicate the example found in [@ching2008higher] for an application of HOMMC. A soft-drink company in Hong Kong is facing an in-house problem of production planning and inventory control. A pressing issue is the storage space of its central warehouse, which often finds itself in the state of overflow or near capacity. The company is thus in urgent needs to study the interplay between the storage space requirement and the overall growing sales demand. The product can be classified into six possible states (1, 2, 3, 4, 5, 6) according to their sales volumes. All products are labeled as 1 = no sales volume, 2 = very slow-moving (very low sales volume), 3 = slow-moving, 4 = standard, 5 = fast-moving or 6 = very fast-moving (very high sales volume). Such labels are useful from both marketing and production planning points of view. The data is cointaind in `sales` object. ```{r hommsales} data(sales) head(sales) ``` The company would also like to predict sales demand for an important customer in order to minimize its inventory build-up. More importantly, the company can understand the sales pattern of this customer and then develop a marketing strategy to deal with this customer. Customer's sales demand sequences of five important products of the company for a year. We expect sales demand sequences generated by the same customer to be correlated to each other. Therefore by exploring these relationships, one can obtain a better higher-order multivariate Markov model for such demand sequences, hence obtain better prediction rules. In [@ching2008higher] application, they choose the order arbitrarily to be eight, i.e., n = 8. We first estimate all the transition probability matrices $P_{h}^{ij}$ and we also have the estimates of the stationary probability distributions of the five products:. $\widehat{\boldsymbol{x}}^{(1)} = \begin{pmatrix} 0.0818& 0.4052& 0.0483& 0.0335& 0.0037& 0.4275 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(2)} = \begin{pmatrix} 0.3680& 0.1970& 0.0335& 0.0000& 0.0037& 0.3978 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(3)} = \begin{pmatrix} 0.1450& 0.2045& 0.0186& 0.0000& 0.0037& 0.6283 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(4)} = \begin{pmatrix} 0.0000& 0.3569& 0.1338& 0.1896& 0.0632& 0.2565 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(5)} = \begin{pmatrix} 0.0000& 0.3569& 0.1227& 0.2268& 0.0520& 0.2416 \end{pmatrix}^{\boldsymbol{T}}$ By solving the corresponding linear programming problems, we obtain the following higher-order multivariate Markov chain model: \vspace{3mm} $\boldsymbol{x}_{r+1}^{(1)} = \boldsymbol{P}_{1}^{(12)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(2)} = 0.6364\boldsymbol{P}_{1}^{(22)}\boldsymbol{x}_{r}^{(2)} + 0.3636\boldsymbol{P}_{3}^{(22)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(3)} = \boldsymbol{P}_{1}^{(35)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(4)} = 0.2994\boldsymbol{P}_{8}^{(42)}\boldsymbol{x}_{r}^{(2)} + 0.4324\boldsymbol{P}_{1}^{(45)}\boldsymbol{x}_{r}^{(5)} + 0.2681\boldsymbol{P}_{2}^{(45)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(5)} = 0.2718\boldsymbol{P}_{8}^{(52)}\boldsymbol{x}_{r}^{(2)} + 0.6738\boldsymbol{P}_{1}^{(54)}\boldsymbol{x}_{r}^{(4)} + 0.0544\boldsymbol{P}_{2}^{(55)}\boldsymbol{x}_{r}^{(5)}$ \vspace{3mm} According to the constructed 8th order multivariate Markov model, Products A and B are closely related. In particular, the sales demand of Product A depends strongly on Product B. The main reason is that the chemical nature of Products A and B is the same, but they have different packaging for marketing purposes. Moreover, Products B, C, D and E are closely related. Similarly, products C and E have the same product flavor, but different packaging. In this model, it is interesting to note that both Product D and E quite depend on Product B at order of 8, this relationship is hardly to be obtained in conventional Markov model owing to huge amount of parameters. The results show that higher-order multivariate Markov model is quite significant to analyze the relationship of sales demand. ```{r hommcFit, warning = FALSE, message = FALSE} # fit 8th order multivariate markov chain if (requireNamespace("Rsolnp", quietly = TRUE)) { object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) } ``` We choose to show only results shown in the paper. We see that $\lambda$ values are quite close, but not equal, to those shown in the original paper. ```{r result, echo = FALSE} if (requireNamespace("Rsolnp", quietly = TRUE)) { i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } } else { print("package Rsolnp unavailable") } ``` # References markovchain/vignettes/an_introduction_to_markovchain_package.Rmd0000644000176200001440000032063515137702633025206 0ustar liggesusers--- title: plain: "The markovchain Package: A Package for Easily Handling Discrete Markov Chains in R" formatted: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" short: "\\pkg{markovchain} package: discrete Markov chains in \\proglang{R}" pagetitle: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" author: - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat FCAS, FSA, CSPA Unipol Group address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedygiorgio@gmail.com} url: www.statisticaladvisor.com - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Sai Bhargav Yalamanchi" affiliation: B-Tech student, Electrical Engineering address: > Indian Institute of Technology, Bombay Mumbai - 400 076, India email: \email{bhargavcoolboy@gmail.com} - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Ignacio Cordón" affiliation: Software Engineer address: > Madrid (Madrid), Spain email: \email{nacho.cordon.castillo@gmail.com} preamble: > \author{\small{Giorgio Alfredo Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi, Deepak Yadav, Ignacio Cordón}} \Plainauthor{G.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón} \usepackage{graphicx} \usepackage{amsmath} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} \usepackage{amsfonts} abstract: | The \pkg{markovchain} package aims to fill a gap within the \proglang{R} framework providing S4 classes and methods for easily handling discrete time Markov chains, homogeneous and simple inhomogeneous ones as well as continuous time Markov chains. The S4 classes for handling and analysing discrete and continuous time Markov chains are presented, as well as functions and method for performing probabilistic and statistical analysis. Finally, some examples in which the package's functions are applied to Economics, Finance and Natural Sciences topics are shown. output: if (rmarkdown::pandoc_version() < "2.7") function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{An introduction to markovchain package} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] formatted: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ``` # Introduction Markov chains represent a class of stochastic processes of great interest for the wide spectrum of practical applications. In particular, discrete time Markov chains (DTMC) permit to model the transition probabilities between discrete states by the aid of matrices.Various \proglang{R} packages deal with models that are based on Markov chains: * \pkg{msm} [@msmR] handles Multi-State Models for panel data. * \pkg{mcmcR} [@mcmcR] implements Monte Carlo Markov Chain approach. * \pkg{hmm} [@hmmR] fits hidden Markov models with covariates. * \pkg{mstate} fits `Multi-State Models based on Markov chains for survival analysis [@mstateR]. Nevertheless, the \proglang{R} statistical environment [@rSoftware] seems to lack a simple package that coherently defines S4 classes for discrete Markov chains and allows to perform probabilistic analysis, statistical inference and applications. For the sake of completeness, \pkg{markovchain} is the second package specifically dedicated to DTMC analysis, being \pkg{DTMCPack} [@DTMCPackR] the first one. Notwithstanding, \pkg{markovchain} package [@pkg:markovchain] aims to offer more flexibility in handling DTMC than other existing solutions, providing S4 classes for both homogeneous and semi-homogeneous Markov chains as well as methods suited to perform statistical and probabilistic analysis. The \pkg{markovchain} package depends on the following \proglang{R} packages: \pkg{expm} [@expmR] to perform efficient matrices powers; \pkg{igraph} [@pkg:igraph] to perform pretty plotting of `markovchain` objects and \pkg{matlab} [@pkg:matlab], that contains functions for matrix management and calculations that emulate those within \proglang{MATLAB} environment. Moreover, other scientific softwares provide functions specifically designed to analyze DTMC, as \proglang{Mathematica} 9 [@mathematica9]. The paper is structured as follows: Section \@ref(sec:mathematics) briefly reviews mathematics and definitions regarding DTMC, Section \@ref(sec:structure) discusses how to handle and manage Markov chain objects within the package, Section \@ref(sec:probability) and Section \@ref(sec:statistics) show how to perform probabilistic and statistical modelling, while Section \@ref(sec:applications) presents some applied examples from various fields analyzed by means of the \pkg{markovchain} package. # Review of core mathematical concepts {#sec:mathematics} ## General Definitions A DTMC is a sequence of random variables $X_{1},\: X_{2}\: ,\ldots,\:X_{n},\ldots$ characterized by the Markov property (also known as memoryless property, see Equation \ref{eq:markovProp}). The Markov property states that the distribution of the forthcoming state $X_{n+1}$ depends only on the current state $X_{n}$ and doesn't depend on the previous ones $X_{n-1},\: X_{n-2},\ldots,\: X_{1}$. \begin{equation} Pr\left(X_{n+1}=x_{n+1}\left|X_{1}=x_{1},X_{2}=x_{2,}...,X_{n}=x_{n}\right.\right)=Pr\left(X_{n+1}=x_{n+1}\left|X_{n}=x_{n}\right.\right). \label{eq:markovProp} \end{equation} The set of possible states $S=\left\{ s_{1},s_{2},...,s_{r}\right\}$ of $X_{n}$ can be finite or countable and it is named the state space of the chain. The chain moves from one state to another (this change is named either 'transition' or 'step') and the probability $p_{ij}$ to move from state $s_{i}$ to state $s_{j}$ in one step is named transition probability: \begin{equation} p_{ij}=Pr\left(X_{1}=s_{j}\left|X_{0}=s_{i}\right.\right). \label{eq:trProp} \end{equation} The probability of moving from state $i$ to $j$ in $n$ steps is denoted by $p_{ij}^{(n)}=Pr\left(X_{n}=s_{j}\left|X_{0}=s_{i}\right.\right)$. A DTMC is called time-homogeneous if the property shown in Equation \ref{eq:mcHom} holds. Time homogeneity implies no change in the underlying transition probabilities as time goes on. \begin{equation} Pr\left(X_{n+1}=s_{j}\left|X_{n}=s_{i}\right.\right)=Pr\left(X_{n}=s_{j}\left|X_{n-1}=s_{i}\right.\right). \label{eq:mcHom} \end{equation} If the Markov chain is time-homogeneous, then $p_{ij}=Pr\left(X_{k+1}=s_{j}\left|X_{k}=s_{i}\right.\right)$ and \newline $p_{ij}^{(n)}=Pr\left(X_{n+k}=s_{j}\left|X_{k}=s_{i}\right.\right)$, where $k>0$. The probability distribution of transitions from one state to another can be represented into a transition matrix $P=(p_{ij})_{i,j}$, where each element of position $(i,j)$ represents the transition probability $p_{ij}$. E.g., if $r=3$ the transition matrix $P$ is shown in Equation \ref{eq:trPropEx} \begin{equation} P=\left[\begin{array}{ccc} p_{11} & p_{12} & p_{13}\\ p_{21} & p_{22} & p_{23}\\ p_{31} & p_{32} & p_{33} \end{array}\right]. \label{eq:trPropEx} \end{equation} The distribution over the states can be written in the form of a stochastic row vector $x$ (the term stochastic means that $\sum_{i}x_{i}=1, x_{i} \geq 0$): e.g., if the current state of $x$ is $s_{2}$, $x=\left(0\:1\:0\right)$. As a consequence, the relation between $x^{(1)}$ and $x^{(0)}$ is $x^{(1)}=x^{(0)}P$ and, recursively, we get $x^{(2)}=x^{(0)}P^{2}$ and $x^{(n)}=x^{(0)}P^{n},\, n>0$. DTMC are explained in most theory books on stochastic processes, see \cite{bremaud1999discrete} and \cite{dobrow2016introduction} for example. Valuable references online available are: \cite{konstantopoulos2009markov}, \cite{probBook} and \cite{bardPpt}. ## Properties and classification of states {#sec:properties} A state $s_{j}$ is said accessible from state $s_{i}$ (written $s_{i}\rightarrow s_{j}$) if a system starting in state $s_{i}$ has a positive probability to reach the state $s_{j}$ at a certain point, i.e., $\exists n>0:\: p_{ij}^{n}>0$. If both $s_{i}\rightarrow s_{j}$ and $s_{j}\rightarrow s_{i}$, then $s_{i}$ and $s_{j}$ are said to communicate. A communicating class is defined to be a set of states that communicate. A DTMC can be composed by one or more communicating classes. If the DTMC is composed by only one communicating class (i.e., if all states in the chain communicate), then it is said irreducible. A communicating class is said to be closed if no states outside of the class can be reached from any state inside it. If $p_{ii}=1$, $s_{i}$ is defined as absorbing state: an absorbing state corresponds to a closed communicating class composed by one state only. The canonical form of a DTMC transition matrix is a matrix having a block form, where the closed communicating classes are shown at the beginning of the diagonal matrix. A state $s_{i}$ has period $k_{i}$ if any return to state $s_{i}$ must occur in multiplies of $k_{i}$ steps, that is $k_{i}=gcd\left\{ n:Pr\left(X_{n}=s_{i}\left|X_{0}=s_{i}\right.\right)>0\right\}$, where $gcd$ is the greatest common divisor. If $k_{i}=1$ the state $s_{i}$ is said to be aperiodic, else if $k_{i}>1$ the state $s_{i}$ is periodic with period $k_{i}$. Loosely speaking, $s_{i}$ is periodic if it can only return to itself after a fixed number of transitions $k_{i}>1$ (or multiple of $k_{i}$), else it is aperiodic. If states $s_{i}$ and $s_{j}$ belong to the same communicating class, then they have the same period $k_{i}$. As a consequence, each of the states of an irreducible DTMC share the same periodicity. This periodicity is also considered the DTMC periodicity. It is possible to classify states according to their periodicity. Let $T^{x\rightarrow x}$ is the number of periods to go back to state $x$ knowing that the chain starts in $x$. * A state $x$ is recurrent if $P(T^{x\rightarrow x}<+\infty)=1$ (equivalently $P(T^{x\rightarrow x}=+\infty)=0$). In addition: 1. A state $x$ is null recurrent if in addition $E(T^{x\rightarrow x})=+\infty$. 2. A state $x$ is positive recurrent if in addition $E(T^{x\rightarrow x})<+\infty$. 3. A state $x$ is absorbing if in addition $P(T^{x\rightarrow x}=1)=1$. * A state $x$ is transient if $P(T^{x\rightarrow x}<+\infty)<1$ (equivalently $P(T^{x\rightarrow x}=+\infty)>0$). It is possible to analyze the timing to reach a certain state. The first passage time (or hitting time) from state $s_{i}$ to state $s_{j}$ is the number $T_{ij}$ of steps taken by the chain until it arrives for the first time to state $s_{j}$, given that $X_{0} = s_{i}$. The probability distribution of $T_{ij}$ is defined by Equation \ref{eq:fpt1} \begin{equation} {h_{ij}}^{\left( n \right)} = Pr\left( {T_{ij} = n} \right) = Pr\left( X_n = s_j,X_{n - 1} \ne s_{j}, \ldots ,X_1 \ne s_j |X_0 = s_i \right) \label{eq:fpt1} \end{equation} and can be found recursively using Equation \ref{eq:ftp2}, given that ${h_{ij}}^{\left( n \right)} = p_{ij}$. \begin{equation} {h_{ij}}^{\left( n \right)} = \sum\limits_{k \in S - \left\{ s_{j} \right\}}^{} {{p_{ik}}{h_{kj}}^{\left( {n - 1} \right)}}. \label{eq:ftp2} \end{equation} A commonly used quantity related to $h$ is its average value, i.e. the \emph{mean first passage time} (also expected hitting time), namely $\bar h_{ij}= \sum_{n=1\dots\infty} n \,h_{ij}^{(n)}$. If in the definition of the first passage time we let $s_{i}=s_{j}$, we obtain the first recurrence time $T_{i}=\inf \{ n\geq1:X_{n}=s_{i}|X_{0}=s_{i} \}$. We could also ask ourselves which is the *mean recurrence time*, an average of the mean first recurrence times: \[ r_i = \sum_{k = 1}^{\infty} k \cdot P(T_i = k) \] Revisiting the definition of recurrence and transience: a state $s_{i}$ is said to be recurrent if it is visited infinitely often, i.e., $Pr(T_{i}<+\infty|X_{0}=s_{i})=1$. On the opposite, $s_{i}$ is called transient if there is a positive probability that the chain will never return to $s_{i}$, i.e., $Pr(T_{i}=+\infty|X_{0}=s_{i})>0$. Given a time homogeneous Markov chain with transition matrix \emph{P}, a stationary distribution \emph{z} is a stochastic row vector such that $z=z\cdot P$, where $0\leq z_{j}\leq 1 \: \forall j$ and $\sum_{j}z_{j}=1$. If a DTMC $\{X_{n}\}$ is irreducible and aperiodic, then it has a limit distribution and this distribution is stationary. As a consequence, if $P$ is the $k\times k$ transition matrix of the chain and $z=\left(z_{1},...,z_{k}\right)$ is the unique eigenvector of $P$ such that $\sum_{i=1}^{k}z_{i}=1$, then we get \begin{equation} \underset{n\rightarrow\infty}{lim}P^{n}=Z, \label{eq:limMc} \end{equation} where $Z$ is the matrix having all rows equal to $z$. The stationary distribution of $\{X_{n}\}$ is represented by $z$. A matrix $A$ is called primitive if all of its entries are strictly positive, and we write it $A > 0$. If the transition matrix $P$ for a DTMC has some primitive power, i.e. it exists $m > 0: P^m > 0$, then the DTMC is said to be regular. In fact being regular is equivalent to being irreducible and aperiodic. All regular DTMCs are irreducible. The counterpart is not true. Given two absorbing states $s_A$ (source) and $s_B$ (sink), the \emph{committor probability} $q_j^{(AB)}$ is the probability that a process starting in state $s_i$ is absorbed in state $s_B$ (rather than $s_A$) [@noe_constructing_2009]. It can be computed via \begin{equation} q_j^{(AB)} = \sum_{k \ni {A, B}} P_{jk}q_k^{(AB)} \quad \mbox{with} \quad q_A^{(AB)} = 0 \quad \mbox{and} \quad q_B^{(AB)} = 1 \end{equation} Note we can also define the hitting probability from $i$ to $j$ as the probability of ever reaching the state $j$ if our initial state is $i$: \begin{equation} h_{i,j} = Pr(T_{ij} < \infty) = \sum_{n = 0}^{\infty} h_{ij}^{(n)} \label{eq:hitting-probs} \end{equation} In a DTMC with finite set of states, we know that a transient state communicates at least with one recurrent state. If the chain starts in a transient element, once it hits a recurrent state, it is going to be caught in its recurrent state, and we cannot expect it would go back to the initial state. Given a transient state $i$ we can define the *absorption probability* to the recurrent state $j$ as the probability that the first recurrent state that the Markov chain visits (and therefore gets absorbed by its recurrent class) is $j$, $f^{*}_ij$. We can also define the *mean absorption time* as the mean number of steps the transient state $i$ would take until it hits any recurrent state, $b_i$. ## A short example Consider the following numerical example. Suppose we have a DTMC with a set of 3 possible states $S=\{s_{1}, s_{2}, s_{3}\}$. Let the transition matrix be: \begin{equation} P=\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]. \label{eq:trPropExEx1} \end{equation} In $P$, $p_{11}=0.5$ is the probability that $X_{1}=s_{1}$ given that we observed $X_{0}=s_{1}$ is 0.5, and so on.It is easy to see that the chain is irreducible since all the states communicate (it is made by one communicating class only). Suppose that the current state of the chain is $X_{0}=s_{2}$, i.e., $x^{(0)}=(0\:1\:0)$, then the probability distribution of states after 1 and 2 steps can be computed as shown in Equations \@ref(eq:trPropExEx2) and \@ref(eq:trPropExEx3). \begin{equation} x^{(1)}=\left(0\:1\:0\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.15\:0.45\:0.4\right). \label{eq:trPropExEx2} \end{equation} \begin{equation} x^{(n)}=x^{(n-1)}P \to \left(0.15\:0.45\:0.4\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.2425\:0.3725\:0.385\right). \label{eq:trPropExEx3} \end{equation} If we were interested in the probability of being in the state $s_{3}$ in the second step, then $Pr\left(X_{2}=s_{3}\left|X_{0}=s_{2}\right.\right)=0.385$. \newpage # The structure of the package {#sec:structure} ## Creating markovchain objects The package is loaded within the \proglang{R} command line as follows: ```{r, load, results='hide', message=FALSE} library("markovchain") ``` The `markovchain` and `markovchainList` S4 classes [@chambers] are defined within the \pkg{markovchain} package as displayed: ```{r, showClass, echo=FALSE} showClass("markovchain") showClass("markovchainList") ``` The first class has been designed to handle homogeneous Markov chain processes, while the latter (which is itself a list of `markovchain` objects) has been designed to handle semi-homogeneous Markov chains processes. Any element of `markovchain` class is comprised by following slots: 1. `states`: a character vector, listing the states for which transition probabilities are defined. 2. `byrow`: a logical element, indicating whether transition probabilities are shown by row or by column. 3. `transitionMatrix`: the probabilities of the transition matrix. 4. `name`: optional character element to name the DTMC. The `markovchainList` objects are defined by following slots: 1. `markovchains`: a list of `markovchain` objects. 2. `name`: optional character element to name the DTMC. The `markovchain` objects can be created either in a long way, as the following code shows ```{r mcInitLong} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ``` or in a shorter way, displayed below ```{r mcInitShort} mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ``` When `new("markovchain")` is called alone, a default Markov chain is created. ```{r defaultMc} defaultMc <- new("markovchain") ``` The quicker way to create `markovchain` objects is made possible thanks to the implemented `initialize` S4 method that checks that: * the `transitionMatrix`, either of class matrix or Matrix, to be a transition matrix, i.e., all entries to be probabilities and either all rows or all columns to sum up to one. * the columns and rows names of `transitionMatrix` to be defined and to coincide with `states` vector slot. The `markovchain` objects can be collected in a list within `markovchainList` S4 objects as following example shows. ```{r intromcList} mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ``` ## Handling markovchain objects Table \@ref(tab:methodsToHandleMc) lists which methods handle and manipulate `markovchain` objects. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Purpose \\ \hline \hline \code{*} & Direct multiplication for transition matrices.\\ \code{\textasciicircum{}} & Compute the power \code{markovchain} of a given one.\\ \code{[} & Direct access to the elements of the transition matrix.\\ \code{==} & Equality operator between two transition matrices.\\ \code{!=} & Inequality operator between two transition matrices.\\ \code{as} & Operator to convert \code{markovchain} objects into \code{data.frame} and\\ & \code{table} object.\\ \code{dim} & Dimension of the transition matrix.\\ \code{names} & Equal to \code{states}.\\ \code{names<-} & Change the \code{states} name.\\ \code{name} & Get the name of \code{markovchain object}.\\ \code{name<-} & Change the name of \code{markovchain object}.\\ \code{plot} & \code{plot} method for \code{markovchain} objects.\\ \code{print} & \code{print} method for \code{markovchain} objects.\\ \code{show} & \code{show} method for \code{markovchain} objects.\\ \code{sort} & \code{sort} method for \code{markovchain} objects, in terms of their states.\\ \code{states} & Name of the transition states.\\ \code{t} & Transposition operator (which switches \code{byrow} `slot value and modifies \\ & the transition matrix coherently).\\ \hline \end{tabular} \caption{\pkg{markovchain} methods for handling \code{markovchain} objects.} \label{tab:methodsToHandleMc} \end{table} The examples that follow shows how operations on `markovchain` objects can be easily performed. For example, using the previously defined matrix we can find what is the probability distribution of expected weather states in two and seven days, given the actual state to be cloudy. ```{r operations} initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ``` A similar answer could have been obtained defining the vector of probabilities as a column vector. A column - defined probability matrix could be set up either creating a new matrix or transposing an existing `markovchain` object thanks to the `t` method. ```{r operations2} initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ``` The initial state vector previously shown can not necessarily be a probability vector, as the code that follows shows: ```{r fval} fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ``` Basic methods have been defined for `markovchain` objects to quickly get states and transition matrix dimension. ```{r otherMethods} states(mcWeather) names(mcWeather) dim(mcWeather) ``` Methods are available to set and get the name of `markovchain` object. ```{r otherMethods2} name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ``` Also it is possible to alphabetically sort the transition matrix: ```{r sortMethod} markovchain:::sort(mcWeather) ``` A direct access to transition probabilities is provided both by `transitionProbability` method and `"["` method. ```{r transProb} transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ``` The transition matrix of a `markovchain` object can be displayed using `print` or `show` methods (the latter being less verbose). Similarly, the underlying transition probability diagram can be plotted by the use of `plot` method (as shown in Figure \@ref(fig:mcPlot)) which is based on \pkg{igraph} package [@pkg:igraph]. `plot` method for `markovchain` objects is a wrapper of `plot.igraph` for `igraph` S4 objects defined within the \pkg{igraph} package. Additional parameters can be passed to `plot` function to control the network graph layout. There are also \pkg{diagram} and \pkg{DiagrammeR} ways available for plotting as shown in Figure \@ref(fig:mcPlotdiagram). The `plot` function also uses `communicatingClasses` function to separate out states of different communicating classes. All states that belong to one class have same color. ```{r printAndShow} print(mcWeather) show(mcWeather) ``` ```{r mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"} if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ``` ```{r mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"} if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ``` Import and export from some specific classes is possible, as shown in Figure \@ref(fig:fromAndTo) and in the following code. ```{r exportImport1} mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ``` ```{r exportImport2} if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ``` from etm (now archived as of September 2020): ```{r exporImport3} if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ``` ```{r fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"} library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ``` Coerce from `matrix` method, as the code below shows, represents another approach to create a `markovchain` method starting from a given squared probability matrix. ```{r exportImport4} myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ``` Semi-homogeneous Markov chains can be created with the aid of `markovchainList` object. The example that follows arises from health insurance, where the costs associated to patients in a Continuous Care Health Community (CCHC) are modeled by a semi-homogeneous Markov Chain, since the transition probabilities change by year. Methods explicitly written for `markovchainList` objects are: `print`, `show`, `dim` and `[`. ```{r cchcMcList} stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ``` It is possible to perform direct access to `markovchainList` elements, as well as to determine the number of `markovchain` objects by which a `markovchainList` object is composed. ```{r cchcMcList2} mcCCRC[[1]] dim(mcCCRC) ``` The `markovchain` package contains some data found in the literature related to DTMC models (see Section \@ref(sec:applications). Table \@ref(tab:datasets) lists datasets and tables included within the current release of the package. \begin{table}[h] \centering \begin{tabular}{p{0.2\textwidth}p{0.75\textwidth}} \hline Dataset & Description \\ \hline \hline \code{blanden} & Mobility across income quartiles, \cite{blandenEtAlii}.\\ \code{craigsendi} & CD4 cells, \cite{craigSendi}.\\ \code{kullback} & raw transition matrices for testing homogeneity, \cite{kullback1962tests}.\\ \code{preproglucacon} & Preproglucacon DNA basis, \cite{averyHenderson}.\\ \code{rain} & Alofi Island rains, \cite{averyHenderson}.\\ \code{holson} & Individual states trajectories.\\ \code{sales} & Sales of six beverages in Hong Kong \cite{ching2008higher}. \\ \hline \end{tabular} \caption{The \pkg{markovchain} \code{data.frame} and \code{table}.} \label{tab:datasets} \end{table} Finally, Table \@ref(tab:demos) lists the demos included in the demo directory of the package. \begin{table}[h] \centering \begin{tabular}{lll} \hline R Code File & Description \\ \hline \hline \code{bard.R} & Structural analysis of Markov chains from Bard PPT.\\ \code{examples.R} & Notable Markov chains, e.g., The Gambler Ruin chain.\\ \code{quickStart.R} & Generic examples.\\ \code{extractMatrices.R} & Generic examples.\\ \hline \end{tabular} \caption{The \pkg{markovchain} demos.} \label{tab:demos} \end{table} # Probability with markovchain objects {#sec:probability} The \pkg{markovchain} package contains functions to analyse DTMC from a probabilistic perspective. For example, the package provides methods to find stationary distributions and identifying absorbing and transient states. Many of these methods come from \proglang{MATLAB} listings that have been ported into \proglang{R}. For a full description of the underlying theory and algorithm the interested reader can overview the original \proglang{MATLAB} listings, \cite{renaldoMatlab} and \cite{montgomery}. Table \@ref(tab:methodsToStats) shows methods that can be applied on `markovchain` objects to perform probabilistic analysis. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Returns \\ \hline \hline \code{absorbingStates} & the absorbing states of the transition matrix, if any.\\ \code{steadyStates} & the vector(s) of steady state(s) in matrix form. \\ \code{meanFirstPassageTime} & matrix or vector of mean first passage times. \\ \code{meanRecurrenceTime} & vector of mean number of steps to return to each recurrent state \\ \code{hittingProbabilities} & matrix of hitting probabilities for a Markov chain. \\ \code{meanAbsorptionTime} & expected number of steps for a transient state to be \\ & absorbed by any recurrent class \\ \code{absorptionProbabilities} & probabilities of transient states of being \\ & absorbed by each recurrent state \\ \code{committorAB} & committor probabilities \\ \code{communicatingClasses} & list of communicating classes. \\ & $s_{j}$, given actual state $s_{i}$. \\ \code{canonicForm} & the transition matrix into canonic form. \\ \code{is.accessible} & checks whether a state j is reachable from state i. \\ \code{is.irreducible} & checks whether a DTMC is irreducible. \\ \code{is.regular} & checks whether a DTMC is regular. \\ \code{period} & the period of an irreducible DTMC. \\ \code{recurrentClasses} & list of recurrent communicating classes. \\ \code{transientClasses} & list of transient communicating classes. \\ \code{recurrentStates} & the recurrent states of the transition matrix. \\ \code{transientStates} & the transient states of the transition matrix, if any. \\ \code{summary} & DTMC summary. \\ \hline \end{tabular} \caption{\pkg{markovchain} methods: statistical operations.} \label{tab:methodsToStats} \end{table} ## Conditional distributions The conditional distribution of weather states, given that current day's weather is sunny, is given by following code. ```{r conditionalDistr} conditionalDistribution(mcWeather, "sunny") ``` ## Stationary states A stationary (steady state, or equilibrium) vector is a probability vector such that Equation \ref{eq:steadystat2} holds \begin{equation} \begin{matrix} 0\leq \pi_j \leq 1\\ \sum_{j \in S} \pi_j = 1\\ \pi \cdot P = \pi \end{matrix} \label{eq:steadystat2} \end{equation} Steady states are associated to $P$ eigenvalues equal to one. We could be tempted to compute them solving the eigen values / vectors of the matrix and taking real parts (since if $u + iv$ is a eigen vector, for the matrix $P$, then $Re(u + iv) = u$ and $Im(u + iv) = v$ are eigen vectors) and normalizing by the vector sum, this carries some concerns: 1. If $u, v \in \mathbb{R}^n$ are linearly independent eigen vectors associated to $1$ eigen value, $u + iv$, $u + iu$ are also linearly independent eigen vectors, and their real parts coincide. Clearly if we took real parts, we would be loosing an eigen vector, because we cannot know in advance if the underlying algorithm to compute the eigen vectors is going to output something similar to what we described. We should be agnostic to the underlying eigen vector computation algorithm. 2. Imagine the identity $P$ of dimensions $2 \times 2$. Its eigen vectors associated to the $1$ eigen value are $u = (1, 0)$ and $v = (0, 1)$. However, the underlying algorithm to compute eigen vectors could return $(1, -2)$ and $(-2, 1)$ instead, that are linear combinations of the aforementioned ones, and therefore eigen vectors. Normalizing by their sum, we would get: $(-1, 2)$ and $(2, -1)$, which obviously are not probability measures. Again, we should be agnostic to the underlying eigen computation algorithm. 3. Algorithms to compute eigen values / vectors are computationally expensive: they are iterative, and we cannot predict a fixed number of iterations for them. Moreover, each iteration takes $\mathcal{O}(m^2)$ or $\mathcal{O}(m^3)$ algorithmic complexity, with $m$ the number of states. We are going to use that every irreducible DTMC has a unique steady state, that is, if $M$ is the matrix for an irreducible DTMC (all states communicate with each other), then it exists a unique $v \in \mathbb{R}^m$ such that: \[ v \cdot M = v, \qquad \sum_{i = 1}^m v_i = 1 \] Also, we'll use that a steady state for a DTMC assigns $0$ to the transient states. The canonical form of a (by row) stochastic matrix looks alike: \[ \left(\begin{array}{c|c|c|c|c} M_1 & 0 & 0 & \ldots & 0 \\ \hline 0 & M_2 & 0 & \ldots & 0 \\ \hline 0 & 0 & M_3 & \ldots & 0 \\ \hline \vdots & \vdots & \vdots & \ddots & \vdots \\ \hline A_1 & A_2 & A_3 & \ldots & R \end{array}\right) \] where $M_i$ corresponds to irreducible sub-chains, the blocks $A_i$ correspond to the transitions from transient states to each of the recurrent classes and $R$ are the transitions from the transient states to themselves. Also, we should note that a Markov chain has exactly the same name of steady states as recurrent classes. Therefore, we have coded the following algorithm ^[We would like to thank Prof. Christophe Dutang for his contributions to the development of this method. He coded a first improvement of the original `steadyStates` method and we could not have reached the current correctness without his previous work]: 1. Identify the recurrent classes $[C_1, \ldots, C_l]$ with \texttt{recurrentClasses} function. 2. Take each class $C_i$, compute the sub-matrix corresponding to it $M_i$. 3. Solve the system $v \cdot C_i = v, \, \sum_{j = 1}^{|C_i|} v_j = 1$ which has a unique solution, for each $i = 1, \ldots, l$. 3. Map each state $v_i$ to the original order in $P$ and assign a $0$ to the slots corresponding to transient states in the matrix. The result is returned in matrix form. ```{r steadyStates} steadyStates(mcWeather) ``` It is possible for a Markov chain to have more than one stationary distribution, as the gambler ruin example shows. ```{r gamblerRuin} gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- markovchain:::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ``` ## Classification of states Absorbing states are determined by means of `absorbingStates` method. ```{r absorbingStates} absorbingStates(mcGR4) absorbingStates(mcWeather) ``` The key function in methods which need knowledge about communicating classes, recurrent states, transient states, is `.commclassKernel`, which is a modification of Tarjan's algorithm from \cite{Tarjan}. This `.commclassKernel` method gets a transition matrix of dimension $n$ and returns a list of two items: 1. `classes`, an matrix whose $(i, j)$ entry is `true` if $s_i$ and $s_j$ are in the same communicating class. 2. `closed`, a vector whose $i$ -th entry indicates whether the communicating class to which $i$ belongs is closed. These functions are used by two other internal functions on which the `summary` method for `markovchain` objects works. The example matrix used in \cite{renaldoMatlab} well exemplifies the purpose of the function. ```{r renaldoMatrix1} P <- markovchain:::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ``` All states that pertain to a transient class are named "transient" and a specific method has been written to elicit them. ```{r transientStates} transientStates(probMc) ``` `canonicForm` method that turns a Markov chain into its canonic form, reordering the states to have first the recurrent classes and then the transient states. ```{r probMc2Canonic} probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ``` The function `is.accessible` permits to investigate whether a state $s_{j}$ is accessible from state $s_i$, that is whether the probability to eventually reach $s_j$ starting from $s_{i}$ is greater than zero. ```{r isAccessible} is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ``` In Section \@ref(sec:properties) we observed that, if a DTMC is irreducible, all its states share the same periodicity. Then, the `period` function returns the periodicity of the DTMC, provided that it is irreducible. The example that follows shows how to find if a DTMC is reducible or irreducible by means of the function `is.irreducible` and, in the latter case, the method `period` is used to compute the periodicity of the chain. ```{r periodicity} E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ``` The example Markov chain found in \proglang{Mathematica} web site \citep{mathematica9MarkovChain} has been used, and is plotted in Figure \@ref(fig:mcMathematics). ```{r mathematica9Mc} mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ``` ```{r mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."} plot(mathematicaMc, layout = layout.fruchterman.reingold) ``` ```{r mathematica9MC, echo=FALSE} summary(mathematicaMc) ``` ## First passage time distributions and means \cite{renaldoMatlab} provides code to compute first passage time (within $1,2,\ldots, n$ steps) given the initial state to be $i$. The \proglang{MATLAB} listings translated into \proglang{R} on which the `firstPassage` function is based are: ```{r fpTime1, eval=FALSE} .firstpassageKernel <- function(P, i, n){ G <- P H <- P[i,] E <- 1 - diag(size(P)[2]) for (m in 2:n) { G <- P %*% (G * E) H <- rbind(H, G[i,]) } return(H) } ``` We conclude that the probability for the *first* rainy day to be the third one, given that the current state is sunny, is given by: ```{r fpTime2} firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ``` To compute the *mean* first passage times, i.e. the expected number of days before it rains given that today is sunny, we can use the `meanFirstPassageTime` function: ```{r mfpt1} meanFirstPassageTime(mcWeather) ``` indicating e.g. that the average number of days of sun or cloud before rain is 6.67 if we start counting from a sunny day, and 5 if we start from a cloudy day. Note that we can also specify one or more destination states: ```{r mfpt2} meanFirstPassageTime(mcWeather,"rain") ``` The implementation follows the matrix solutions by [@GrinsteadSnell]. We can check the result by averaging the first passage probability density function: ```{r mfpt3} firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ``` ## Mean recurrence time The `meanRecurrenceTime` method gives the first mean recurrence time (expected number of steps to go back to a state if it was the initial one) for each recurrent state in the transition probabilities matrix for a DTMC. Let's see an example: ```{r mrt-weather} meanRecurrenceTime(mcWeather) ``` Another example, with not all of its states being recurrent: ```{r mrt-probMc} recurrentStates(probMc) meanRecurrenceTime(probMc) ``` ## Absorption probabilities and mean absorption time We are going to use the Drunkard’s random walk from [@GrinsteadSnell]. We have a drunk person walking through the street. Each move the person does, if they have not arrived to either home (corner 1) or to the bar (corner 5) could be to the left corner or to the right one, with equal probability. In case of arrival to the bar or to home, the person stays there. ```{r data-drunkard} drunkProbs <- markovchain:::zeros(5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ``` Recurrent (in fact absorbing states) are: ```{r rs-drunkard} recurrentStates(drunkMc) ``` Transient states are the rest: ```{r ts-drunkard} transientStates(drunkMc) ``` The probability of either being absorbed by the bar or by the sofa at home are: ```{r ap-drunkard} absorptionProbabilities(drunkMc) ``` which means that the probability of arriving home / bar is inversely proportional to the distance to each one. But we also would like to know how much time does the person take to arrive there, which can be done with `meanAbsorptionTime`: ```{r at-drunkard} meanAbsorptionTime(drunkMc) ``` So it would take `3` steps to arrive to the destiny if the person is either in the second or fourth corner, and `4` steps in case of being at the same distance from home than to the bar. ## Committor probability The committor probability tells us the probability to reach a given state before another given. Suppose that we start in a cloudy day, the probabilities of experiencing a rainy day before a sunny one is 0.5: ```{r} committorAB(mcWeather,3,1) ``` ## Hitting probabilities Rewriting the system \eqref{eq:hitting-probs} as: \begin{equation*} A = \left(\begin{array}{c|c|c|c} A_1 & 0 & \ldots & 0 \\ \hline 0 & A_2 & \ldots & 0 \\ \hline \vdots & \vdots & \ddots & 0 \\ \hline 0 & 0 & \ldots & A_n \end{array}\right) \end{equation*} \begin{eqnarray*} A_1 &= \left(\begin{matrix} -1 & p_{1,2} & p_{1,3} & \ldots & p_{1,n} \\ 0 & (p_{2,2} - 1) & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & p_{n, 2} & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ A_2 &= \left(\begin{matrix} (p_{1,1} - 1) & 0 & p_{1,3} & \ldots & p_{1,n} \\ p_{2,1} & -1 & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & 0 & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ \vdots & \vdots\\ A_n &= \left(\begin{matrix} (p_{1,1} - 1) & p_{1,2} & p_{1,3} & \ldots & 0 \\ p_{2,1} & (p_{2,2} -1) & p_{2,3} & \ldots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & p_{n,2} & p_{n,3} & \ldots & -1 \end{matrix}\right)\\ \end{eqnarray*} \begin{equation*} \begin{array}{lr} X_j = \left(\begin{array}{c} h_{1,j} \\ h_{2,j} \\ \vdots \\ h_{n,j} \end{array}\right) & C_j = - \left(\begin{array}{c} p_{1,j} \\ p_{2,j} \\ \vdots \\ p_{n,j} \end{array}\right) \end{array} \end{equation*} we end up having to solve the block systems: \begin{equation} A_j \cdot X_j = C_j \end{equation} Let us imagine the $i$ -th state has transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then that same row would turn into $(0,0, \ldots, 0)$ for some block, thus obtaining a singular matrix. Another case which may give us problems could be: state $i$ has the following transition probabilities: $(0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0)$ and the state $j$ has the following transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then when building some blocks we will end up with rows: \begin{eqnarray*} (0, \ldots, 0, \underset{i)}{-1}, 0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0) \\ (0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0, \underset{j)}{-1}, 0, \ldots, 0) \end{eqnarray*} which are linearly dependent. Our hypothesis is that if we treat the closed communicating classes differently, we *might* delete the linearity in the system. If we have a closed communicating class $C_u$, then $h_{i,j} = 1$ for all $i,j \in C_u$ and $h_{k,j} = 0$ for all $k\not\in C_u$. Then we can set $X_u$ appropriately and solve the other $X_v$ using those values. The method in charge of that in `markovchain` package is `hittingProbabilities`, which receives a Markov chain and computes the matrix $(h_{ij})_{i,j = 1,\ldots, n}$ where $S = \{s_1, \ldots, s_n\}$ is the set of all states of the chain. For the following chain: ```{r hitting-data} M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ``` we want to compute the hitting probabilities. That can be done with: ```{r hitting-probabilities} hittingProbabilities(hittingTest) ``` In the case of the `mcWeather` Markov chain we would obtain a matrix with all its elements set to $1$. That makes sense (and is desirable) since if today is sunny, we expect it would be sunny again at certain point in the time, and the same with rainy weather (that way we assure good harvests): ```{r hitting-weather} hittingProbabilities(mcWeather) ``` # Statistical analysis {#sec:statistics} Table \@ref(tab:funs4Stats) lists the functions and methods implemented within the package which help to fit, simulate and predict DTMC. \begin{table}[h] \centering \begin{tabular}{lll} \hline Function & Purpose \\ \hline \hline \code{markovchainFit} & Function to return fitted Markov chain for a given sequence.\\ \code{predict} & Method to calculate predictions from \code{markovchain} or \\ & \code{markovchainList} objects.\\ \code{rmarkovchain} & Function to sample from \code{markovchain} or \code{markovchainList} objects.\\ \hline \end{tabular} \caption{The \pkg{markovchain} statistical functions.} \label{tab:funs4Stats} \end{table} ## Simulation Simulating a random sequence from an underlying DTMC is quite easy thanks to the function `rmarkovchain`. The following code generates a year of weather states according to `mcWeather` underlying stochastic process. ```{r simulatingAMarkovChain} weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ``` Similarly, it is possible to simulate one or more sequences from a semi-homogeneous Markov chain, as the following code (applied on CCHC example) exemplifies. ```{r simulatingAListOfMarkovChain} patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ``` Two advance parameters are available to the `rmarkovchain` method which helps you decide which implementation to use. There are four options available : \proglang{R}, \proglang{R} in parallel, \proglang{C++} and \proglang{C++} in parallel. Two boolean parameters `useRcpp` and `parallel` will decide which implementation will be used. Default is \code{useRcpp = TRUE} and \code{parallel = FALSE} i.e. \proglang{C++} implementation. The \proglang{C++} implementation is generally faster than the `R` implementation. If you have multicore processors then you can take advantage of `parallel` parameter by setting it to `TRUE`. When both `Rcpp=TRUE` and `parallel=TRUE` the parallelization has been carried out using \pkg{RcppParallel} package \citep{pkg:RcppParallel}. ## Estimation A time homogeneous Markov chain can be fit from given data. Four methods have been implemented within current version of \pkg{markovchain} package: maximum likelihood, maximum likelihood with Laplace smoothing, Bootstrap approach, maximum a posteriori. Equation \ref{eq:MLE} shows the maximum likelihood estimator (MLE) of the $p_{ij}$ entry, where the $n_{ij}$ element consists in the number sequences $\left( X_{t}=s_{i}, X_{t+1}=s_{j}\right)$ found in the sample, that is \begin{equation} {\hat p^{MLE}}_{ij} = \frac{n_{ij}}{\sum\limits_{u = 1}^k {n_{iu}}}. \label{eq:MLE} \end{equation} Equation \@ref(eq:SE) shows the `standardError` of the MLE \citep{MSkuriat}. \begin{equation} SE_{ij} = \frac{ {\hat p^{MLE}}_{ij} }{\sqrt{n_{ij}}} \label{eq:SE} \end{equation} ```{r fitMcbyMLE2} weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ``` The Laplace smoothing approach is a variation of the MLE, where the $n_{ij}$ is substituted by $n_{ij}+\alpha$ (see Equation \ref{eq:LAPLACE}), being $\alpha$ an arbitrary positive stabilizing parameter. \begin{equation} {\hat p^{LS}}_{ij} = \frac{{{n_{ij}} + \alpha }}{{\sum\limits_{u = 1}^k {\left( {{n_{iu}} + \alpha } \right)} }} \label{eq:LAPLACE} \end{equation} ```{r fitMcbyLAPLACE} weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ``` (NOTE: The Confidence Interval option is enabled by default. Remove this option to fasten computations.) Both MLE and Laplace approach are based on the `createSequenceMatrix` functions that returns the raw counts transition matrix. ```{r fitSequenceMatrix} createSequenceMatrix(stringchar = weathersOfDays) ``` `stringchar` could contain `NA` values, and the transitions containing `NA` would be ignored. An issue occurs when the sample contains only one realization of a state (say $X_{\beta}$) which is located at the end of the data sequence, since it yields to a row of zero (no sample to estimate the conditional distribution of the transition). In this case the estimated transition matrix is corrected assuming $p_{\beta,j}=1/k$, being $k$ the possible states. Create sequence matrix can also be used to obtain raw count transition matrices from a given $n*2$ matrix as the following example shows: ```{r fitSequenceMatrix2} myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ``` A bootstrap estimation approach has been developed within the package in order to provide an indication of the variability of ${\hat p}_{ij}$ estimates. The bootstrap approach implemented within the \pkg{markovchain} package follows these steps: 1. bootstrap the data sequences following the conditional distributions of states estimated from the original one. The default bootstrap samples is 10, as specified in `nboot` parameter of `markovchainFit` function. 2. apply MLE estimation on bootstrapped data sequences that are saved in `bootStrapSamples` slot of the returned list. 3. the ${p^{BOOTSTRAP}}_{ij}$ is the average of all ${p^{MLE}}_{ij}$ across the `bootStrapSamples` list, normalized by row. A `standardError` of $\hat{{p^{MLE}}_{ij}}$ estimate is provided as well. ```{r fitMcbyBootStrap1} weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ``` The bootstrapping process can be done in parallel thanks to \pkg{RcppParallel} package \citep{pkg:RcppParallel}. Parallelized implementation is definitively suggested when the data sample size or the required number of bootstrap runs is high. ```{r fitMcbyBootStrap2, eval=FALSE} weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 200, parallel = TRUE) weatherFittedBOOTParallel$estimate weatherFittedBOOTParallel$standardError ``` The parallel bootstrapping uses all the available cores on a machine by default. However, it is also possible to tune the number of threads used. Note that this should be done in R before calling the `markovchainFit` function. For example, the following code will set the number of threads to 4. ```{r fitMcbyBootStrap3, eval=FALSE} RcppParallel::setNumThreads(2) ``` For more details, please refer to \pkg{RcppParallel} web site. For all the fitting methods, the `logLikelihood` \citep{MSkuriat} denoted in Equation \ref{eq:LLH} is provided. \begin{equation} LLH = \sum_{i,j} n_{ij} * log (p_{ij}) \label{eq:LLH} \end{equation} where $n_{ij}$ is the entry of the frequency matrix and $p_{ij}$ is the entry of the transition probability matrix. ```{r fitMcbyMLE1} weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ``` Confidence matrices of estimated parameters (parametric for MLE, non - parametric for BootStrap) are available as well. The `confidenceInterval` is provided with the two matrices: `lowerEndpointMatrix` and `upperEndpointMatrix`. The confidence level (CL) is 0.95 by default and can be given as an argument of the function `markovchainFit`. This is used to obtain the standard score (z-score). From classical inference theory, if $ci$ is the level of confidence required assuming normal distribution the $zscore(ci)$ solves $\Phi \left ( 1-\left(\frac{1-ci}{2}\right) \right )$ Equations \ref{eq:CIL} and \ref{eq:CIU} \citep{MSkuriat} show the `confidenceInterval` of a fitting. Note that each entry of the matrices is bounded between 0 and 1. \begin{align} LowerEndpoint_{ij} = p_{ij} - zscore (CL) * SE_{ij} \label{eq:CIL} \\ UpperEndpoint_{ij} = p_{ij} + zscore (CL) * SE_{ij} \label{eq:CIU} \end{align} ```{r confint} weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ``` A special function, `multinomialConfidenceIntervals`, has been written in order to obtain multinomial wise confidence intervals. The code has been based on and Rcpp translation of package's \pkg{MultinomialCI} functions \cite{pkg:MultinomialCI} that were themselves based on the \cite{sison1995simultaneous} paper. ```{r multinomial} multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ``` The functions for fitting DTMC have mostly been rewritten in \proglang{C++} using \pkg{Rcpp} \cite{RcppR} since version 0.2. It is also possible to fit a DTMC object from `matrix` or `data.frame` objects as shown in following code. ```{r fitMclists} data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ``` The same applies for `markovchainList` (output length has been limited). ```{r fitMclistsFit1, output.lines=20} mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ``` Finally, given a `list` object, it is possible to fit a `markovchain` object or to obtain the raw transition matrix. ```{r fitMclistsFit2} c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ``` The same works for `markovchainFitList`. ```{r fitAMarkovChainListfromAlist, output.lines=15} markovchainListFit(data=mylist) ``` If any transition contains `NA`, it will be ignored in the results as the above example showed. ## Prediction The $n$-step forward predictions can be obtained using the `predict` methods explicitly written for `markovchain` and `markovchainList` objects. The prediction is the mode of the conditional distribution of $X_{t+1}$ given $X_{t}=s_{j}$, being $s_{j}$ the last realization of the DTMC (homogeneous or semi-homogeneous). ### Predicting from a markovchain object The 3-days forward predictions from `markovchain` object can be generated as follows, assuming that the last two days were respectively "cloudy" and "sunny". ```{r markovchainPredict} predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ``` ### Predicting from a markovchainList object Given an initial two years health status, the 5-year ahead prediction of any CCRC guest is ```{r markovchainListPredict} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ``` The prediction has stopped at time sequence since the underlying semi-homogeneous Markov chain has a length of four. In order to continue five years ahead, the `continue=TRUE` parameter setting makes the `predict` method keeping to use the last `markovchain` in the sequence list. ```{r markovchainListPredict2} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ``` ## Statistical Tests In this section, we describe the statistical tests: assessing the Markov property (`verifyMarkovProperty`), the order (`assessOrder`), the stationary (`assessStationarity`) of a Markov chain sequence, and the divergence test for empirically estimated transition matrices (`divergenceTest`). Most of such tests are based on the $\chi ^2$ statistics. Relevant references are \cite{kullback1962tests} and \cite{anderson1957statistical}. All such tests have been designed for small samples, since it is easy to detect departures from Markov property as long as the sample size increases. In addition, the accuracy of the statistical inference functions has been questioned and will be thoroughly investigated in future versions of the package. ### Assessing the Markov property of a Markov chain sequence The `verifyMarkovProperty` function verifies whether the Markov property holds for the given chain. The test implemented in the package looks at triplets of successive observations. If $x_1, x_2, \ldots, x_N$ is a set of observations and $n_{ijk}$ is the number of times $t$ $\left(1 \le t \le N-2 \right)$ such that $x_t=i, x_{t+1}=j, x_{x+2}=k$, then if the Markov property holds $n_{ijk}$ follows a Binomial distribution with parameters $n_{ij}$ and $p_{jk}$. A classical $\chi^2$ test can check this distributional assumption, since $\sum_{i}\sum_{j}\sum_{k}\frac{(n_{ijk}-n_{ij}\hat{p_{jk}})^2}{n_{ij}\hat{p_{jk}}}\sim \chi^2\left(q \right )$ where q is the number of degrees of freedom. The number of degrees of freedom q of the distribution of $\chi^2$ is given by the formula r-q+s-1, where: s denotes the number of states i in the state space such that n_{i} > 0 q denotes the number of pairs (i, j) for which n_{ij} > 0 and r denotes the number of triplets (i, j, k) for which n_{ij}n_{jk} > 0 ```{r test1} sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ``` ### Assessing the order of a Markov chain sequence The `assessOrder` function checks whether the given chain is of first order or of second order. For each possible present state, we construct a contingency table of the frequency of the future state for each past to present state transition as shown in Table \ref{tab:order}. \begin{table}[h] \centering \begin{tabular}{l | l | l | l} \hline past & present & future & future \\ & & a & b \\ \hline \hline a & a & 2 & 2\\ b & a & 2 & 2\\ \hline \end{tabular} \caption{Contingency table to assess the order for the present state a.} \label{tab:order} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is of first order. ```{r test2} data(rain) assessOrder(rain$rain) ``` ### Assessing the stationarity of a Markov chain sequence The `assessStationarity` function assesses if the transition probabilities of the given chain change over time. To be more specific, the chain is stationary if the following condition meets. \begin{equation} p_{ij}(t) = p_{ij} ~\textrm{ for all }~t \label{eq:stationarity} \end{equation} For each possible state, we construct a contingency table of the estimated transition probabilities over time as shown in Table \ref{tab:stationarity}. \begin{table}[h] \centering \begin{tabular}{l | l | l} \hline time (t) & probability of transition to a & probability of transition to b \\ \hline \hline 1 & 0 & 1\\ 2 & 0 & 1\\ . & . & . \\ . & . & . \\ . & . & . \\ 16 & 0.44 & 0.56\\ \hline \end{tabular} \caption{Contingency table to assess the stationarity of the state a.} \label{tab:stationarity} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is stationary. ```{r test3} assessStationarity(rain$rain, 10) ``` ### Divergence tests for empirically estimated transition matrices This section discusses tests developed to verify whether: 1. An empirical transition matrix is consistent with a theoretical one. 2. Two or more empirical transition matrices belongs to the same DTMC. The first test is implemented by the `verifyEmpiricalToTheoretical` function. Being $f_{ij}$ the raw transition count, \cite{kullback1962tests} shows that $2*\sum_{i=1}^{r}\sum_{j=1}^{r}f_{ij}\ln\frac{f_{ij}}{f_{i.}P\left( E_j | E_i\right)} \sim \chi^2\left ( r*(r-1) \right )$. The following example is taken from \cite{kullback1962tests}: ```{r divergence1} sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ``` The second one is implemented by the `verifyHomogeneity` function, inspired by \cite[section~9]{kullback1962tests}. Assuming that $i=1,2, \ldots, s$ DTMC samples are available and that the cardinality of the state space is $r$ it verifies whether the $s$ chains belongs to the same unknown one. \cite{kullback1962tests} shows that its test statistics follows a chi-square law, $2*\sum_{i=1}^{s}\sum_{j=1}^{r}\sum_{k=1}^{r}f_{ijk}\ln\frac{n*f_{ijk}}{f_{i..}f_{.jk}} \sim \chi^2\left ( r*(r-1) \right )$. Also the following example is taken from \cite{kullback1962tests}: ```{r divergence2} data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ``` ## Continuous Times Markov Chains ### Intro The \pkg{markovchain} package provides functionality for continuous time Markov chains (CTMCs). CTMCs are a generalization of discrete time Markov chains (DTMCs) in that we allow time to be continuous. We assume a finite state space $S$ (for an infinite state space wouldn't fit in memory). We can think of CTMCs as Markov chains in which state transitions can happen at any time. More formally, we would like our CTMCs to satisfy the following two properties: * The Markov property - let $F_{X(s)}$ denote the information about $X$ up to time $s$. Let $j \in S$ and $s \leq t$. Then, $P(X(t) = j|F_{X(s)}) = P(X(t) = j|X(s))$. * Time homogeneity - $P(X(t) = j|X(s) = k) = P(X(t-s) = j|X(0) = k)$. If both the above properties are satisfied, it is referred to as a time-homogeneous CTMC. If a transition occurs at time $t$, then $X(t)$ denotes the new state and $X(t)\neq X(t-)$. Now, let $X(0)=x$ and let $T_x$ be the time a transition occurs from this state. We are interested in the distribution of $T_x$. For $s,t \geq 0$, it can be shown that $ P(T_x > s+t | T_x > s) = P(T_x > t) $ This is the memory less property that only the exponential random variable exhibits. Therefore, this is the sought distribution, and each state $s \in S$ has an exponential holding parameter $\lambda(s)$. Since $\mathrm{E}T_x = \frac{1}{\lambda(x)}$, higher the rate $\lambda(x)$, smaller the expected time of transitioning out of the state $x$. However, specifying this parameter alone for each state would only paint an incomplete picture of our CTMC. To see why, consider a state $x$ that may transition to either state $y$ or $z$. The holding parameter enables us to predict when a transition may occur if we start off in state $x$, but tells us nothing about which state will be next. To this end, we also need transition probabilities associated with the process, defined as follows (for $y \neq x$) - $p_{xy} = P(X(T_s) = y | X(0) = x)$. Note that $\sum_{y \neq x} p_{xy} = 1$. Let $Q$ denote this transition matrix ($Q_{ij} = p_{ij}$). What is key here is that $T_x$ and the state $y$ are independent random variables. Let's define $\lambda(x, y) = \lambda(x) p_{xy}$ We now look at Kolmogorov's backward equation. Let's define $P_{ij}(t) = P(X(t) = j | X(0) = i)$ for $i, j \in S$. The backward equation is given by (it can be proved) $P_{ij}(t) = \delta_{ij}e^{-\lambda(i)t} + \int_{0}^{t}\lambda(i)e^{-\lambda(i)t} \sum_{k \neq i} Q_{ik} P_{kj}(t-s) ds$. Basically, the first term is non-zero if and only if $i=j$ and represents the probability that the first transition from state $i$ occurs after time $t$. This would mean that at $t$, the state is still $i$. The second term accounts for any transitions that may occur before time $t$ and denotes the probability that at time $t$, when the smoke clears, we are in state $j$. This equation can be represented compactly as follows $P'(t) = AP(t)$ where $A$ is the *generator* matrix. \[ A(i, j) = \begin{cases} \lambda(i, j) & \mbox{if } i \neq j \\ -\lambda(i) & \mbox{else.} \end{cases} \] Observe that the sum of each row is 0. A CTMC can be completely specified by the generator matrix. ### Stationary Distributions The following theorem guarantees the existence of a unique stationary distribution for CTMCs. Note that $X(t)$ being irreducible and recurrent is the same as $X_n(t)$ being irreducible and recurrent. Suppose that $X(t)$ is irreducible and recurrent. Then $X(t)$ has an invariant measure $\eta$, which is unique up to multiplicative factors. Moreover, for each $k \in S$, we have \[\eta_k = \frac{\pi_k}{\lambda(k)}\] where $\pi$ is the unique invariant measure of the embedded discrete time Markov chain $Xn$. Finally, $\eta$ satisfies \[0 < \eta_j < \infty, \forall j \in S\] and if $\sum_i \eta_i < \infty$ then $\eta$ can be normalized to get a stationary distribution. ### Estimation Let the data set be $D = \{(s_0, t_0), (s_1, t_1), ..., (s_{N-1}, t_{N-1})\}$ where $N=|D|$. Each $s_i$ is a state from the state space $S$ and during the time $[t_i,t_{i+1}]$ the chain is in state $s_i$. Let the parameters be represented by $\theta = \{\lambda, P\}$ where $\lambda$ is the vector of holding parameters for each state and $P$ the transition matrix of the embedded discrete time Markov chain. Then the probability is given by \[ {Pr(D | \theta) \propto \lambda(s_0)e^{-\lambda(s_0)(t_1-t_0)}Pr(s_1|s_0) \cdot\ldots\cdot \lambda(s_{N-2})e^{-\lambda(s_{N-2})(t_{N-1}-t_{N-2})}Pr(s_{N-1}|s_{N-2})} \] Let $n(j|i)$ denote the number of $i$->$j$ transitions in $D$, and $n(i)$ the number of times $s_i$ occurs in $D$. Let $t(s_i)$ denote the total time the chain spends in state $s_i$. Then the MLEs are given by \[ \hat{\lambda(s)} = \frac{n(s)}{t(s)},\hat{Pr(j|i)}=\frac{n(j|i)}{n(i)} \] ### Expected Hitting Time The package provides a function `ExpectedTime` to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in S$, where $S$ is the set of all states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations: \begin{equation} \begin{cases} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{cases} \label{eq:EHT} \end{equation} ### Probability at time t The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. Here we use Kolmogorov's backward equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. Here $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. ### Examples To create a CTMC object, you need to provide a valid generator matrix, say $Q$. The CTMC object has the following slots - states, generator, by row, name (look at the documentation object for further details). Consider the following example in which we aim to model the transition of a molecule from the $\sigma$ state to the $\sigma^*$ state. When in the former state, if it absorbs sufficient energy, it can make the jump to the latter state and remains there for some time before transitioning back to the original state. Let us model this by a CTMC: ```{r rCtmcInit} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` To generate random CTMC transitions, we provide an initial distribution of the states. This must be in the same order as the dimnames of the generator. The output can be returned either as a list or a data frame. ```{r rctmcRandom0} statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ``` $n$ represents the number of samples to generate. There is an optional argument $T$ for `rctmc`. It represents the time of termination of the simulation. To use this feature, set $n$ to a very high value, say `Inf` (since we do not know the number of transitions before hand) and set $T$ accordingly. ```{r ctmcRandom1} statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ``` To obtain the stationary distribution simply invoke the `steadyStates` function ```{r rctmcSteadyStates} steadyStates(molecularCTMC) ``` For fitting, use the `ctmcFit` function. It returns the MLE values for the parameters along with the confidence intervals. ```{r rctmcFitting} data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ``` One approach to obtain the generator matrix is to apply the `logm` function from the \pkg{expm} package on a transition matrix. Numeric issues arise, see \cite{israel2001finding}. For example, applying the standard `method` ('Higham08') on `mcWeather` raises an error, whilst the alternative method (eigenvalue decomposition) is OK. The following code estimates the generator matrix of the `mcWeather` transition matrix. ```{r mcWeatherQ} mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ``` Therefore, the "half - day" transition probability for mcWeather DTMC is ```{r mcWeatherHalfDay} mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ``` The \pkg{ctmcd} package \citep{pkg:ctmcd} provides various functions to estimate the generator matrix (GM) of a CTMC process using different methods. The following code provides a way to join \pkg{markovchain} and \pkg{ctmcd} computations. ```{r ctmcd1} if(requireNamespace(package='ctmcd', quietly = TRUE)) { require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 } else { warning('package ctmcd unavailable') } ``` ## Pseudo - Bayesian Estimation \cite{Hu2002} shows an empirical quasi-Bayesian method to estimate transition matrices, given an empirical $\hat{P}$ transition matrix (estimated using the classical approach) and an a - priori estimate $Q$. In particular, each row of the matrix is estimated using the linear combination $\alpha \cdot Q+\left(1-1alpha\right) \cdot P$, where $\alpha$ is defined for each row as Equation \ref{eq:pseudobayes} shows \begin{equation} \left\{\begin{matrix} \hat{\alpha_i}=\frac{\hat{K_i}}{v\left(i \right )+\hat{K_i}}\\ \hat{K_i}=\frac{v\left(i \right)^2 - \sum_{j}Y_{ij}^2}{\sum_{j}(Y_{ij}-v\left(i \right)*q_{ij})^2} \end{matrix}\right. \label{eq:pseudobayes} \end{equation} The following code returns the pseudo Bayesian estimate of the transition matrix: ```{r pseudobayes} pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ``` We then apply it to the weather example: ```{r pseudobayes2} trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ``` ## Bayesian Estimation The \pkg{markovchain} package provides functionality for maximum a posteriori (MAP) estimation of the chain parameters (at the time of writing this document, only first order models are supported) by Bayesian inference. It also computes the probability of observing a new data set, given a (different) data set. This vignette provides the mathematical description for the methods employed by the package. ### Notation and set-up The data is denoted by $D$, the model parameters (transition matrix) by $\theta$. The object of interest is $P(\theta | D)$ (posterior density). $\mathcal{A}$ represents an alphabet class, each of whose members represent a state of the chain. Therefore \[D = s_0 s_1 ... s_{N-1}, s_t \in \mathcal{A}\] where $N$ is the length of the data set. Also, \[\theta = \{p(s|u), s \in \mathcal{A}, u \in \mathcal{A} \}\] where $\sum_{s \in \mathcal{A}} p(s|u) = 1$ for each $u \in \mathcal{A}$. Our objective is to find $\theta$ which maximizes the posterior. That is, if our solution is denoted by $\hat{\theta}$, then \[\hat{\theta} = \underset{\theta}{argmax}P(\theta | D)\] where the search space is the set of right stochastic matrices of dimension $|\mathcal{A}|x|\mathcal{A}|$. $n(u, s)$ denotes the number of times the word $us$ occurs in $D$ and $n(u)=\sum_{s \in \mathcal{A}}n(u, s)$. The hyper-parameters are similarly denoted by $\alpha(u, s)$ and $\alpha(u)$ respectively. ### Methods Given $D$, its likelihood conditioned on the observed initial state in D is given by \[P(D|\theta) = \prod_{s \in \mathcal{A}} \prod_{u \in \mathcal{A}} p(s|u)^{n(u, s)}\] Conjugate priors are used to model the prior $P(\theta)$. The reasons are two fold: 1. Exact expressions can be derived for the MAP estimates, expectations and even variances 2. Model order selection/comparison can be implemented easily (available in a future release of the package) The hyper-parameters determine the form of the prior distribution, which is a product of Dirichlet distributions \[P(\theta) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{\alpha(u, s)) - 1} \Big\}\] where $\Gamma(.)$ is the Gamma function. The hyper-parameters are specified using the `hyperparam` argument in the `markovchainFit` function. If this argument is not specified, then a default value of 1 is assigned to each hyper-parameter resulting in the prior distribution of each chain parameter to be uniform over $[0,1]$. Given the likelihood and the prior as described above, the evidence $P(D)$ is simply given by \[P(D) = \int P(D|\theta) P(\theta) d\theta\] which simplifies to \[ P(D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u))} \Big\} \] Using Bayes' theorem, the posterior now becomes (thanks to the choice of conjugate priors) \[ P(\theta | D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(n(u) + \alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{n(u, s) + \alpha(u, s)) - 1} \Big\} \] Since this is again a product of Dirichlet distributions, the marginal distribution of a particular parameter $P(s|u)$ of our chain is given by \[ P(s|u) \sim Beta(n(u, s) + \alpha(u, s), n(u) + \alpha(u) - n(u, s) - \alpha(u, s)) \] Thus, the MAP estimate $\hat{\theta}$ is given by \[ \hat{\theta} = \Big\{ \frac{n(u, s) + \alpha(u, s) - 1}{n(u) + \alpha(u) - |\mathcal{A}|}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The function also returns the expected value, given by \[ \text{E}_{\text{post}} p(s|u) = \Big\{ \frac{n(u, s) + \alpha(u, s)}{n(u) + \alpha(u)}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The variance is given by \[ \text{Var}_{\text{post}} p(s|u) = \frac{n(u, s) + \alpha(u, s)}{(n(u) + \alpha(u))^2} \frac{n(u) + \alpha(u) - n(u, s) - \alpha(u, s)}{n(u) + \alpha(u) + 1} \] The square root of this quantity is the standard error, which is returned by the function. The confidence intervals are constructed by computing the inverse of the beta integral. ### Predictive distribution Given the old data set, the probability of observing new data is $P(D'|D)$ where $D'$ is the new data set. Let $m(u, s), m(u)$ denote the corresponding counts for the new data. Then, \[ P(D'|D) = \int P(D' | \theta) P(\theta | D) d\theta \] We already know the expressions for both quantities in the integral and it turns out to be similar to evaluating the evidence \[ P(D'|D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + m(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u) + m(u))} \Big\} \] ### Choosing the hyper-parameters The hyper parameters model the shape of the parameters' prior distribution. These must be provided by the user. The package offers functionality to translate a given prior belief transition matrix into the hyper-parameter matrix. It is assumed that this belief matrix corresponds to the mean value of the parameters. Since the relation \[ \text{E}_{\text{prior}} p(s | u) = \frac{\alpha(u, s)}{\alpha(u)} \] holds, the function accepts as input the belief matrix as well as a scaling vector (serves as a proxy for $\alpha(.)$) and proceeds to compute $\alpha(., .)$. Alternatively, the function accepts a data sample and infers the hyper-parameters from it. Since the mode of a parameter (with respect to the prior distribution) is proportional to one less than the corresponding hyper-parameter, we set \[ \alpha(u, s) - 1 = m(u, s) \] where $m(u, s)$ is the $u\rightarrow s$ transition count in the data sample. This is regarded as a 'fake count' which helps $\alpha(u, s)$ to reflect knowledge of the data sample. ### Usage and examples ```{r loadAndDoExample} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ``` For the purpose of this section, we shall continue to use the weather of days example introduced in the main vignette of the package (reproduced above for convenience). Let us invoke the fit function to estimate the MAP parameters with 92\% confidence bounds and hyper-parameters as shown below, based on the first 200 days of the weather data. Additionally, let us find out what the probability is of observing the weather data for the next 165 days. The usage would be as follows ```{r MAPFit} hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ``` The results should not change after permuting the dimensions of the matrix. ```{r MAPFit2} hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ``` Note that the predictive probability is very small. However, this can be useful when comparing model orders. Suppose we have an idea of the (prior) transition matrix corresponding to the expected value of the parameters, and have a data set from which we want to deduce the MAP estimates. We can infer the hyper-parameters from this known transition matrix itself, and use this to obtain our MAP estimates. ```{r inferHyperparam} inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ``` Alternatively, we can use a data sample to infer the hyper-parameters. ```{r inferHyperparam2} inferHyperparam(data = weathersOfDays[1:15]) ``` In order to use the inferred hyper-parameter matrices, we do ```{r inferHyperparam3} hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ``` Now we can safely use `hyperMatrix3` and `hyperMatrix4` with `markovchainFit` (in the `hyperparam` argument). Supposing we don't provide any hyper-parameters, then the prior is uniform. This is the same as maximum likelihood. ```{r MAPandMLE} data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ``` # Applications {#sec:applications} This section shows applications of DTMC in various fields. ## Weather forecasting {#app:weather} Markov chains provide a simple model to predict the next day's weather given the current meteorological condition. The first application herewith shown is the "Land of Oz example" from \cite{landOfOz}, the second is the "Alofi Island Rainfall" from \cite{averyHenderson}. ### Land of Oz {#sec:wfLandOfOz} The Land of Oz is acknowledged not to have ideal weather conditions at all: the weather is snowy or rainy very often and, once more, there are never two nice days in a row. Consider three weather states: rainy, nice and snowy. Let the transition matrix be as in the following: ```{r weatPred1} mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ``` Given that today it is a nice day, the corresponding stochastic row vector is $w_{0}=(0\:,1\:,0)$ and the forecast after 1, 2 and 3 days are given by ```{r weatPred2} W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ``` As can be seen from $w_{1}$, if in the Land of Oz today is a nice day, tomorrow it will rain or snow with probability 1. One week later, the prediction can be computed as ```{r weatPred3} W7 <- W0 * (mcWP ^ 7) W7 ``` The steady state of the chain can be computed by means of the `steadyStates` method. ```{r weatPred4} q <- steadyStates(mcWP) q ``` Note that, from the seventh day on, the predicted probabilities are substantially equal to the steady state of the chain and they don't depend from the starting point, as the following code shows. ```{r weatPred5} R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ``` ### Alofi Island Rainfall {#sec:wfAlofi} Alofi Island daily rainfall data were recorded from January 1st, 1987 until December 31st, 1989 and classified into three states: "0" (no rain), "1-5" (from non zero until 5 mm) and "6+" (more than 5mm). The corresponding dataset is provided within the \pkg{markovchain} package. ```{r Alofi1} data("rain", package = "markovchain") table(rain$rain) ``` The underlying transition matrix is estimated as follows. ```{r Alofi2} mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ``` The long term daily rainfall distribution is obtained by means of the `steadyStates` method. ```{r Alofi3} steadyStates(mcAlofi) ``` ## Finance and Economics {#app:fin} Other relevant applications of DTMC can be found in Finance and Economics. ### Finance {#fin:fin} Credit ratings transitions have been successfully modeled with discrete time Markov chains. Some rating agencies publish transition matrices that show the empirical transition probabilities across credit ratings. The example that follows comes from \pkg{CreditMetrics} \proglang{R} package \citep{CreditMetricsR}, carrying Standard \& Poor's published data. ```{r ratings1} rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ``` It is easy to convert such matrices into `markovchain` objects and to perform some analyses ```{r ratings2} creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ``` ### Economics {#fin:ec} For a recent application of \pkg{markovchain} in Economic, see \cite{manchesterR}. A dynamic system generates two kinds of economic effects \citep{bardPpt}: 1. those incurred when the system is in a specified state, and 2. those incurred when the system makes a transition from one state to another. Let the monetary amount of being in a particular state be represented as a m-dimensional column vector $c^{\rm{S}}$, while let the monetary amount of a transition be embodied in a $C^{R}$ matrix in which each component specifies the monetary amount of going from state i to state j in a single step. Henceforth, Equation \@ref(eq:cost) represents the monetary of being in state $i$. \begin{equation} {c_i} = c_i^{\rm{S}} + \sum\limits_{j = 1}^m {C_{ij}^{\rm{R}}} {p_{ij}}. \label{eq:cost} \end{equation} Let $\bar c = \left[ c_i \right]$ and let $e_i$ be the vector valued 1 in the initial state and 0 in all other, then, if $f_n$ is the random variable representing the economic return associated with the stochastic process at time $n$, Equation \@ref(eq:return) holds: \begin{equation} E\left[ {{f_n}\left( {{X_n}} \right)|{X_0} = i} \right] = {e_i}{P^n}\bar c. \label{eq:return} \end{equation} The following example assumes that a telephone company models the transition probabilities between customer/non-customer status by matrix $P$ and the cost associated to states by matrix $M$. ```{r economicAnalysis1} statesNames <- c("customer", "non customer") P <- markovchain:::zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- markovchain:::zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ``` If the average revenue for existing customer is +100, the cost per state is computed as follows. ```{r economicAnalysis2} c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ``` For an existing customer, the expected gain (loss) at the fifth year is given by the following code. ```{r economicAnalysis3} as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ``` ## Actuarial science {#app:act} Markov chains are widely applied in the field of actuarial science. Two classical applications are policyholders' distribution across Bonus Malus classes in Motor Third Party Liability (MTPL) insurance (Section \@ref(sec:bm)) and health insurance pricing and reserving (Section \@ref(sec:hi)). ### MPTL Bonus Malus {#sec:bm} Bonus Malus (BM) contracts grant the policyholder a discount (enworsen) as a function of the number of claims in the experience period. The discount (enworsen) is applied on a premium that already allows for known (a priori) policyholder characteristics \citep{denuit2007actuarial} and it usually depends on vehicle, territory, the demographic profile of the policyholder, and policy coverage deep (deductible and policy limits).\\ Since the proposed BM level depends on the claim on the previous period, it can be modeled by a discrete Markov chain. A very simplified example follows. Assume a BM scale from 1 to 5, where 4 is the starting level. The evolution rules are shown in Equation \ref{eq:BM}: \begin{equation} bm_{t + 1} = \max \left( {1,bm_{t} - 1} \right)*\left( {\tilde N = 0} \right) + \min \left( {5,bm_{t} + 2*\tilde N} \right)*\left( {\tilde N \ge 1} \right). \label{eq:BM} \end{equation} The number of claim $\tilde N$ is a random variable that is assumed to be Poisson distributed. ```{r bonusMalus1} getBonusMalusMarkovChain <- function(lambda) { bmMatr <- markovchain:::zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ``` Assuming that the a-priori claim frequency per car-year is 0.05 in the class (being the class the group of policyholders that share the same common characteristics), the underlying BM transition matrix and its underlying steady state are as follows. ```{r bonusMalus2} bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ``` If the underlying BM coefficients of the class are 0.5, 0.7, 0.9, 1.0, 1.25, this means that the average BM coefficient applied on the long run to the class is given by ```{r bonusMalus3} sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ``` This means that the average premium paid by policyholders in the portfolio almost halves in the long run. ### Health insurance example {#sec:hi} Actuaries quantify the risk inherent in insurance contracts evaluating the premium of insurance contract to be sold (therefore covering future risk) and evaluating the actuarial reserves of existing portfolios (the liabilities in terms of benefits or claims payments due to policyholder arising from previously sold contracts), see \cite{deshmukh2012multiple} for details. An applied example can be performed using the data from \cite{de2016assicurazioni} that has been saved in the `exdata` folder. ```{r healthIns6} ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ``` The data shows the probability of transition between the state of (A)ctive, to (I)ll and Dead. It is easy to complete the transition matrix. ```{r healthIns7} ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ``` Now we build a function that returns the transition during the $t+1$ th year, assuming that the subject has attained year $t$. ```{r healthIns8} possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ``` Cause transitions are not homogeneous across ages, we use a `markovchainList` object to describe the transition probabilities for a guy starting at age 100. ```{r healthIns8-prob} getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ``` We can use such transition for simulating ten life trajectories for a guy that begins "active" (A) aged 100: ```{r healthIns9} rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ``` Lets consider 1000 simulated live trajectories, for a healthy guy aged 80. We can compute the expected time a guy will be disabled starting active at age 80. ```{r healthIns10} transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ``` Assuming that the health insurance will pay a benefit of 12000 per year disabled and that the real interest rate is 0.02, we can compute the lump sum premium at 80. ```{r healthIns11} mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ``` ## Sociology {#app:sociology} Markov chains have been actively used to model progressions and regressions between social classes. The first study was performed by \cite{glassHall}, while a more recent application can be found in \cite{blandenEtAlii}. The table that follows shows the income quartile of the father when the son was 16 (in 1984) and the income quartile of the son when aged 30 (in 2000) for the 1970 cohort. ```{r blandenEtAlii} data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ``` The underlying transition graph is plotted in Figure \@ref(fig:mobility). ```{r mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."} plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ``` The steady state distribution is computed as follows. Since transition across quartiles are shown, the probability function is evenly 0.25. ```{r blandenEtAlii3} round(steadyStates(mobilityMc), 2) ``` ## Genetics and Medicine {#sec:gen} This section contains two examples: the first shows the use of Markov chain models in genetics, the second shows an application of Markov chains in modelling diseases' dynamics. ### Genetics {#sec:genetics} \cite{averyHenderson} discusses the use of Markov chains in model Preprogucacon gene protein bases sequence. The `preproglucacon` dataset in \pkg{markovchain} contains the dataset shown in the package. ```{r preproglucacon1} data("preproglucacon", package = "markovchain") ``` It is possible to model the transition probabilities between bases as shown in the following code. ```{r preproglucacon2} mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ``` ### Medicine {#sec:medicine} Discrete-time Markov chains are also employed to study the progression of chronic diseases. The following example is taken from \cite{craigSendi}. Starting from six month follow-up data, the maximum likelihood estimation of the monthly transition matrix is obtained. This transition matrix aims to describe the monthly progression of CD4-cell counts of HIV infected subjects. ```{r epid1} craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ``` As shown in the paper, the second passage consists in the decomposition of $M_{6}=V \cdot D \cdot V^{-1}$ in order to obtain $M_{1}$ as $M_{1}=V \cdot D^{1/6} \cdot V^{-1}$ . ```{r epid2} eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ``` ```{r epid3} V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) ``` # Discussion, issues and future plans The \pkg{markovchain} package has been designed in order to provide easily handling of DTMC and communication with alternative packages. The package has known several improvements in the recent years: many functions added, porting the software in Rcpp \pkg{Rcpp} package \citep{RcppR} and many methodological improvements that have improved the software reliability. # Acknowledgments {#sec:aknowledgements} The package was selected for Google Summer of Code 2015 support. The authors wish to thank Michael Cole, Tobi Gutman and Mildenberger Thoralf for their suggestions and bug checks. A final thanks also to Dr. Simona C. Minotti and Dr. Mirko Signorelli for their support in drafting this version of the vignettes. \clearpage # References # markovchain/data/0000755000176200001440000000000015137702633013474 5ustar liggesusersmarkovchain/data/rain.rda0000644000176200001440000000202415137702633015113 0ustar liggesusers[jA,>Og&Fcn.U_/術!yqzu*e*Cez4.?rT7R5,-gnLx8F]-o`еt@q:Q9#3YtLoj}%=59z{t.'ɥ$QϹSp>|bFv䏖|4mHWIfM=Zws\ԮCrE{6҇MHSp%t>$g3/ń=j- Y ;,JK0ug R.j=: )fRO!)>1͒g49zF9N>)ėRyqePIpxqcK7TKZ#sEUo(4.[n宆79Y?Q{[zoĕ5L; /K>D-$9oK~Igʧ ~Ezr\a{Q"Lk6OIޡo_f7,nV-^IC(e Pɍ*?ʱ괌Ty\80}GXI+Wr-4+geLzwҀ-uq=UT]du^"+_z"zk j^C:q\8p]̊_ fdhb%gh@m0!Ц^d*& ݮ{Ϯ7eep{ɖgm'忖ww曽է|e_>ٌ{;;markovchain/data/tm_abs.rda0000644000176200001440000000036515137702633015435 0ustar liggesusers r0b```b`f@& `d`al%I@)> ! DP Z*.|~pBO PA0{@+ { t pW J' suO~87Fׁ:S2s/(10p@0T`be2Y0Lb3a (markovchain/data/preproglucacon.rda0000644000176200001440000000336515137702633017216 0ustar liggesusers\MoU]b)9$BZ&p q䀸C#\ǃUo|XU]ūeYx<<;ۛ_q}u}vY_ݵ^.w=>oׇroնToVk-q_?cu,6J#Cjv׾ݭP׳>dG7Ǻ<;΁mVl;5/[C}ʧ,0k͋ySm|Ŋb}Fe11Y6W27W:oSļQj.,,F,';|Ɋ?:,`)Łl./M]'ywfbm~Ęu9kQl90 0X:ilіV'm.gU^c Y ?k1<˝ڎTqMXwxrlg,.]|+?(1nz|\|q؋;?9','k5k?c\5ø(4b9yщTǴOgW/Pt<8!s?bý<,k-Ա+>YmAVsTtlǍ) r rؖ}X_2"vڀJ$(^Ú[[]F,5yo"&t|Q׿ 1Ű7}|8}ŸcldbՌ?Yέ˸v,cv6uX;ڄ)ӋȽ ]mgwϐ06l &#;[{ge8\n?n wGm^be?Km+\m)Ƌ bȫb@6= "bvg9st'b:IZ/]8JD({Dau9y$+䆃(pq֩Mb.=~"(#d  s4蔿g bɢ{tǩ2`#y $^I] 8iGy,lDs:A;N=6WڜSHGKvx58:՝Gdž"zᜂuJHculvJô)뢱m$썎p#M1T/7rNe.Yùm~bN~;-Б|ηB;3EƁc3/Ǝrp6sGQީ#l+/T;J4{Zy,⨹݄#"7Um" 9g\t0FreNYEq(Jchmюlvl6sɺQ=Jq|D£"mvNE#ޘE#Jy㵈ngl+"`0.8n"JZG-; s|8'//d["U)Jg E{H0 YZmarkovchain/data/sales.rda0000644000176200001440000000135515137702633015277 0ustar liggesusersZJA<_&<Ѓ^qI@mifggYw7qkmFA:q< ڸu˧%!+#dfi0_ĞT oTet,@\T 𒄢*ҭ~g#U%%{dN2$^A.G<7Iv8%i`eTl_`N^x&G!@>H«u^Mnd%`FC `Ӏ'Di`iF*J`J*=AU&^J(H ;#npt!):.DU. b8'M J%E*2IT? R)Ñgx-b%UK+M |&x+MtyKU ߽56h]%)|%+*)-OO+ssV۔ny-~;޺p7F]:/markovchain/data/holson.rda0000644000176200001440000000566015137702633015475 0ustar liggesusers7zXZi"6!Xx q])TW"nRʟѫ3^9{/r8A D1/ƌjpݑHčuڶbxz{CN0zs#YeNB[ƅcdTʄqVpmS)T_ |bB0'$YW)}_+@'8cK2f#4*)Dvpnt=@Џ28WH3.9ς>Scsk!(Jv,y~9󣎘BCR7?Cf$qDA՝$ԙ{%YˍrYb墘#`Z qY"&a̚@,:$\_p/D8_@6i bP7pB%zj5k(w%{Iy N6,1nֽ:6,&o6~;/x[k7X:CIpLR@NSX`~n&(#:+*$m#:qaQWkO,ɶpH֔H.L|mfV`j"%!1v^$m\ N1rbn59J25[? JHhbz!NOp@إ@ >x'1IF]O6E򳶚C_^&IW&F?A9jWXX>j{SlOe6xZ:Es]^afΟ .;o3c( ѷ_lI9FO i`Xd(Rx4VT1)LS Db}I`,A)yyܫd-å%d1hj!z* c%XeYSU3$_um3+h 3&0͎/J VЮUߦC";1?g;]# ;0^Hʼn7YOK~eT*{gI3<@Iъu[Y~̾Q\.!bRi!9ex:=#6(066.O=؇7 a޶ldTUO<٧iO F <-nVǬXP|2rۋD㇅{NAEQ rPG.%Vhpza0 Sv"ܦ, r49SwRTwBD5J+K$WA__;cK !Y % &1U3cvqcGkwj肬l#Ja3߆Cܙ݇ 0Ci 6n1* m?qqD*rKt$y4_z D!d2DvT.nX:>5fH۫byP]kf!Q5ޫDV}?u36~yohM'8:cE}a'8!8^&⻐P4Mi _$">B+0{_wO,xHE +F OΐwY}n,Y73TcOVv,2<@tބ8mwaZUO4*Jjzŗ<󝑌1d_6d0gRNRMb"3Ё$=ó0pMe!!|:BSĺϔRKe``c<`:_=]noz2TC8gRן3/}GjHipey؝_d %,VdJSÌ rYY\%s@%CN mt4$;ў x`X!x1S ݣدkNEG`vu`[Et-ȎFM:[?BM`϶Hĭ&T|UZ\(Iɝ &Yg{k4)iK5aS ra8>0 YZmarkovchain/data/craigsendi.rda0000644000176200001440000000025515137702633016276 0ustar liggesusers r0b```b`b&f H020piԼLf>Ck8$;@hcP-;TBĜ xr!E PI^bnj1- U'Vg15YM tM`sSRU͚X\ 5.X dai4Nmarkovchain/data/kullback.rda0000644000176200001440000000040315137702633015751 0ustar liggesusersBZh91AY&SY 3qIP@@?/@@QM=C&e @c$JChD`4`F14R; L``!3  !aJVZ3bqgؠSJz-N22 t@ #include #include using namespace Rcpp; using namespace std; template T sortByDimNames(const T mat){ CharacterVector colNames = colnames(mat); CharacterVector rowNames = rownames(mat); int n = colNames.size(); CharacterVector sortedNames(n); copy(rowNames.begin(), rowNames.end(), sortedNames.begin()); sortedNames.sort(); NumericVector colIdx(n); NumericVector rowIdx(n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (colNames(j) == sortedNames(i)) colIdx(i) = j; if (rowNames(j) == sortedNames(i)) rowIdx(i) = j; } } T sortedMatrix(n); sortedMatrix.attr("dimnames") = List::create(sortedNames, sortedNames); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) sortedMatrix(i, j) = mat(rowIdx(i), colIdx(j)); return sortedMatrix; } double lbeta(double p, double q){ return lgamma(p) + lgamma(q) - lgamma(p + q); } template T transposeMatrix(T & mat) { int numRows = mat.nrow(); int numCols = mat.ncol(); T transpose(numCols, numRows); // Assign dim names transposed (rows to cols and viceversa) transpose.attr("dimnames") = List::create(colnames(mat), rownames(mat)); for (int i = 0; i < numCols; ++i) { transpose(i, _) = mat(_, i); } return transpose; } /* Purpose: BETAIN computes the incomplete Beta function ratio. Licensing: This code is distributed under the GNU LGPL license. Modified: 25 September 2014 Author: Original FORTRAN77 version by KL Majumder, GP Bhattacharjee. C++ version by John Burkardt. Reference: KL Majumder, GP Bhattacharjee, Algorithm AS 63: The incomplete Beta Integral, Applied Statistics, Volume 22, Number 3, 1973, pages 409-411. Parameters: Input, double X, the argument, between 0 and 1. Input, double P, Q, the parameters, which must be positive. Input, double BETA, the logarithm of the complete beta function. Output, int &IFAULT, error flag. 0, no error. nonzero, an error occurred. Output, double BETAIN, the value of the incomplete Beta function ratio. */ double betain(double x, double p, double q, double beta) { double acu = 0.1E-14; double ai; double cx; bool indx; int ns; double pp; double psq; double qq; double rx; double temp; double term; double value; double xx; value = x; // Special cases. if (x == 0.0 || x == 1.0) { return value; } // Change tail if necessary and determine S. psq = p + q; cx = 1.0 - x; if (p < psq * x){ xx = cx; cx = x; pp = q; qq = p; indx = true; } else { xx = x; pp = p; qq = q; indx = false; } term = 1.0; ai = 1.0; value = 1.0; ns = (int) (qq + cx * psq); // Use the Soper reduction formula. rx = xx / cx; temp = qq - ai; if (ns == 0) { rx = xx; } bool loop = true; while (loop) { term = term * temp * rx / (pp + ai); value = value + term;; temp = fabs(term); if (temp <= acu && temp <= acu * value) { value = value * exp (pp * log(xx) + (qq - 1.0) * log(cx) - beta) / pp; if (indx) value = 1.0 - value; loop = false; } ai = ai + 1.0; ns = ns - 1; if (0 <= ns) { temp = qq - ai; if (ns == 0) rx = xx; } else { temp = psq; psq = psq + 1.0; } } return value; } /* Purpose: XINBTA computes inverse of the incomplete Beta function. Discussion: The accuracy exponent SAE was loosened from -37 to -30, because the code would not otherwise accept the results of an iteration with p = 0.3, q = 3.0, alpha = 0.2. Licensing: This code is distributed under the GNU LGPL license. Modified: 25 September 2014 Author: Original FORTRAN77 version by GW Cran, KJ Martin, GE Thomas. C++ version by John Burkardt. Reference: GW Cran, KJ Martin, GE Thomas, Remark AS R19 and Algorithm AS 109: A Remark on Algorithms AS 63: The Incomplete Beta Integral and AS 64: Inverse of the Incomplete Beta Integeral, Applied Statistics, Volume 26, Number 1, 1977, pages 111-114. Parameters: Input, double P, Q, the parameters of the incomplete Beta function. Input, double BETA, the logarithm of the value of the complete Beta function. Input, double ALPHA, the value of the incomplete Beta function. 0 <= ALPHA <= 1. Output, int &IFAULT, error flag. 0, no error occurred. nonzero, an error occurred. Output, double XINBTA, the argument of the incomplete Beta function which produces the value ALPHA. Local Parameters: Local, double SAE, requests an accuracy of about 10^SAE. */ double xinbta ( double p, double q, double beta, double alpha ) { double a; double acu; double adj; double fpu; double g; double h; int iex; bool indx; double pp; double prev; double qq; double r; double s; double sae = -30.0; double sq; double t; double tx; double value; double w; double xin; double y; double yprev; fpu = pow(10.0, sae); value = alpha; // If the answer is easy to determine, return immediately. if (alpha == 0.0 || alpha == 1) return alpha; // Change tail if necessary. if (0.5 < alpha) { a = 1.0 - alpha; pp = q; qq = p; indx = true; } else { a = alpha; pp = p; qq = q; indx = false; } // Calculate the initial approximation. r = sqrt(- log (a * a)); y = r - (2.30753 + 0.27061 * r) / (1.0 + ( 0.99229 + 0.04481 * r) * r); if (1.0 < pp && 1.0 < qq) { r = (y * y - 3.0) / 6.0; s = 1.0 / (pp + pp - 1.0); t = 1.0 / (qq + qq - 1.0); h = 2.0 / (s + t); w = y * sqrt(h + r) / h - (t - s) * (r + 5.0 / 6.0 - 2.0 / (3.0 * h)); value = pp / (pp + qq * exp(w + w)); } else { r = qq + qq; t = 1.0 / (9.0 * qq); t = r * pow(1.0 - t + y * sqrt ( t ), 3); if (t <= 0.0) { value = 1.0 - exp( (log((1.0 - a) * qq) + beta) / qq ); } else { t = (4.0 * pp + r - 2.0) / t; if (t <= 1.0) value = exp( (log(a * pp) + beta) / pp ); else value = 1.0 - 2.0 / (t + 1.0); } } // Solve for X by a modified Newton-Raphson method, // using the function BETAIN. r = 1.0 - pp; t = 1.0 - qq; yprev = 0.0; sq = 1.0; prev = 1.0; if (value < 0.0001) value = 0.0001; if (0.9999 < value) value = 0.9999; iex = max(- 5.0 / pp / pp - 1.0 / pow (a, 0.2) - 13.0, sae); acu = pow(10.0, iex); // Iteration loop. for ( ; ; ) { y = betain(value, pp, qq, beta); xin = value; y = (y - a) * exp(beta + r * log (xin) + t * log (1.0 - xin)); if (y * yprev <= 0.0) prev = max(sq, fpu); g = 1.0; for ( ; ; ) { for ( ; ; ) { adj = g * y; sq = adj * adj; if (sq < prev) { tx = value - adj; if (0.0 <= tx && tx <= 1.0) break; } g = g / 3.0; } // Check whether the current estimate is acceptable. // The change "VALUE = TX" was suggested by Ivan Ukhov. if (prev <= acu || y * y <= acu) { value = tx; if (indx) value = 1.0 - value; return value; } if (tx != 0.0 && tx != 1.0) break; g = g / 3.0; } if (tx == value) break; value = tx; yprev = y; } if(indx) value = 1.0 - value; return value; } #endif markovchain/src/RcppExports.cpp0000644000176200001440000010676315137710161016357 0ustar liggesusers// Generated by using Rcpp::compileAttributes() -> do not edit by hand // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 #include #include using namespace Rcpp; #ifdef RCPP_USE_GLOBAL_ROSTREAM Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); #endif // isGen bool isGen(NumericMatrix gen); RcppExport SEXP _markovchain_isGen(SEXP genSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type gen(genSEXP); rcpp_result_gen = Rcpp::wrap(isGen(gen)); return rcpp_result_gen; END_RCPP } // generatorToTransitionMatrix NumericMatrix generatorToTransitionMatrix(NumericMatrix gen, bool byrow); RcppExport SEXP _markovchain_generatorToTransitionMatrix(SEXP genSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type gen(genSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(generatorToTransitionMatrix(gen, byrow)); return rcpp_result_gen; END_RCPP } // ctmcFit List ctmcFit(List data, bool byrow, String name, double confidencelevel); RcppExport SEXP _markovchain_ctmcFit(SEXP dataSEXP, SEXP byrowSEXP, SEXP nameSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); Rcpp::traits::input_parameter< String >::type name(nameSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(ctmcFit(data, byrow, name, confidencelevel)); return rcpp_result_gen; END_RCPP } // ExpectedTimeRcpp NumericVector ExpectedTimeRcpp(NumericMatrix x, NumericVector y); RcppExport SEXP _markovchain_ExpectedTimeRcpp(SEXP xSEXP, SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type x(xSEXP); Rcpp::traits::input_parameter< NumericVector >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(ExpectedTimeRcpp(x, y)); return rcpp_result_gen; END_RCPP } // probabilityatTRCpp NumericMatrix probabilityatTRCpp(NumericMatrix y); RcppExport SEXP _markovchain_probabilityatTRCpp(SEXP ySEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type y(ySEXP); rcpp_result_gen = Rcpp::wrap(probabilityatTRCpp(y)); return rcpp_result_gen; END_RCPP } // impreciseProbabilityatTRCpp NumericVector impreciseProbabilityatTRCpp(S4 C, int i, int t, int s, double error); RcppExport SEXP _markovchain_impreciseProbabilityatTRCpp(SEXP CSEXP, SEXP iSEXP, SEXP tSEXP, SEXP sSEXP, SEXP errorSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type C(CSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type t(tSEXP); Rcpp::traits::input_parameter< int >::type s(sSEXP); Rcpp::traits::input_parameter< double >::type error(errorSEXP); rcpp_result_gen = Rcpp::wrap(impreciseProbabilityatTRCpp(C, i, t, s, error)); return rcpp_result_gen; END_RCPP } // seq2freqProb NumericVector seq2freqProb(CharacterVector sequence); RcppExport SEXP _markovchain_seq2freqProb(SEXP sequenceSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type sequence(sequenceSEXP); rcpp_result_gen = Rcpp::wrap(seq2freqProb(sequence)); return rcpp_result_gen; END_RCPP } // seq2matHigh NumericMatrix seq2matHigh(CharacterVector sequence, int order); RcppExport SEXP _markovchain_seq2matHigh(SEXP sequenceSEXP, SEXP orderSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type sequence(sequenceSEXP); Rcpp::traits::input_parameter< int >::type order(orderSEXP); rcpp_result_gen = Rcpp::wrap(seq2matHigh(sequence, order)); return rcpp_result_gen; END_RCPP } // markovchainSequenceRcpp CharacterVector markovchainSequenceRcpp(int n, S4 markovchain, CharacterVector t0, bool include_t0); RcppExport SEXP _markovchain_markovchainSequenceRcpp(SEXP nSEXP, SEXP markovchainSEXP, SEXP t0SEXP, SEXP include_t0SEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< S4 >::type markovchain(markovchainSEXP); Rcpp::traits::input_parameter< CharacterVector >::type t0(t0SEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); rcpp_result_gen = Rcpp::wrap(markovchainSequenceRcpp(n, markovchain, t0, include_t0)); return rcpp_result_gen; END_RCPP } // markovchainListRcpp List markovchainListRcpp(int n, List object, bool include_t0, CharacterVector t0); RcppExport SEXP _markovchain_markovchainListRcpp(SEXP nSEXP, SEXP objectSEXP, SEXP include_t0SEXP, SEXP t0SEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< List >::type object(objectSEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); Rcpp::traits::input_parameter< CharacterVector >::type t0(t0SEXP); rcpp_result_gen = Rcpp::wrap(markovchainListRcpp(n, object, include_t0, t0)); return rcpp_result_gen; END_RCPP } // markovchainSequenceParallelRcpp List markovchainSequenceParallelRcpp(S4 listObject, int n, bool include_t0, CharacterVector init_state); RcppExport SEXP _markovchain_markovchainSequenceParallelRcpp(SEXP listObjectSEXP, SEXP nSEXP, SEXP include_t0SEXP, SEXP init_stateSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type listObject(listObjectSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< bool >::type include_t0(include_t0SEXP); Rcpp::traits::input_parameter< CharacterVector >::type init_state(init_stateSEXP); rcpp_result_gen = Rcpp::wrap(markovchainSequenceParallelRcpp(listObject, n, include_t0, init_state)); return rcpp_result_gen; END_RCPP } // createSequenceMatrix NumericMatrix createSequenceMatrix(SEXP stringchar, bool toRowProbs, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain_createSequenceMatrix(SEXP stringcharSEXP, SEXP toRowProbsSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type stringchar(stringcharSEXP); Rcpp::traits::input_parameter< bool >::type toRowProbs(toRowProbsSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(createSequenceMatrix(stringchar, toRowProbs, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // mcListFitForList List mcListFitForList(List data); RcppExport SEXP _markovchain_mcListFitForList(SEXP dataSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); rcpp_result_gen = Rcpp::wrap(mcListFitForList(data)); return rcpp_result_gen; END_RCPP } // _matr2Mc S4 _matr2Mc(CharacterMatrix matrData, double laplacian, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain__matr2Mc(SEXP matrDataSEXP, SEXP laplacianSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterMatrix >::type matrData(matrDataSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(_matr2Mc(matrData, laplacian, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // _list2Mc S4 _list2Mc(List data, double laplacian, bool sanitize); RcppExport SEXP _markovchain__list2Mc(SEXP dataSEXP, SEXP laplacianSEXP, SEXP sanitizeSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type data(dataSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); rcpp_result_gen = Rcpp::wrap(_list2Mc(data, laplacian, sanitize)); return rcpp_result_gen; END_RCPP } // inferHyperparam List inferHyperparam(NumericMatrix transMatr, NumericVector scale, CharacterVector data); RcppExport SEXP _markovchain_inferHyperparam(SEXP transMatrSEXP, SEXP scaleSEXP, SEXP dataSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMatr(transMatrSEXP); Rcpp::traits::input_parameter< NumericVector >::type scale(scaleSEXP); Rcpp::traits::input_parameter< CharacterVector >::type data(dataSEXP); rcpp_result_gen = Rcpp::wrap(inferHyperparam(transMatr, scale, data)); return rcpp_result_gen; END_RCPP } // markovchainFit List markovchainFit(SEXP data, String method, bool byrow, int nboot, double laplacian, String name, bool parallel, double confidencelevel, bool confint, NumericMatrix hyperparam, bool sanitize, CharacterVector possibleStates); RcppExport SEXP _markovchain_markovchainFit(SEXP dataSEXP, SEXP methodSEXP, SEXP byrowSEXP, SEXP nbootSEXP, SEXP laplacianSEXP, SEXP nameSEXP, SEXP parallelSEXP, SEXP confidencelevelSEXP, SEXP confintSEXP, SEXP hyperparamSEXP, SEXP sanitizeSEXP, SEXP possibleStatesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< SEXP >::type data(dataSEXP); Rcpp::traits::input_parameter< String >::type method(methodSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); Rcpp::traits::input_parameter< int >::type nboot(nbootSEXP); Rcpp::traits::input_parameter< double >::type laplacian(laplacianSEXP); Rcpp::traits::input_parameter< String >::type name(nameSEXP); Rcpp::traits::input_parameter< bool >::type parallel(parallelSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); Rcpp::traits::input_parameter< bool >::type confint(confintSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); Rcpp::traits::input_parameter< bool >::type sanitize(sanitizeSEXP); Rcpp::traits::input_parameter< CharacterVector >::type possibleStates(possibleStatesSEXP); rcpp_result_gen = Rcpp::wrap(markovchainFit(data, method, byrow, nboot, laplacian, name, parallel, confidencelevel, confint, hyperparam, sanitize, possibleStates)); return rcpp_result_gen; END_RCPP } // noofVisitsDistRCpp NumericVector noofVisitsDistRCpp(NumericMatrix matrix, int i, int N); RcppExport SEXP _markovchain_noofVisitsDistRCpp(SEXP matrixSEXP, SEXP iSEXP, SEXP NSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type N(NSEXP); rcpp_result_gen = Rcpp::wrap(noofVisitsDistRCpp(matrix, i, N)); return rcpp_result_gen; END_RCPP } // multinomialCIForRow NumericMatrix multinomialCIForRow(NumericVector x, double confidencelevel); RcppExport SEXP _markovchain_multinomialCIForRow(SEXP xSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type x(xSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(multinomialCIForRow(x, confidencelevel)); return rcpp_result_gen; END_RCPP } // multinomCI List multinomCI(NumericMatrix transMat, NumericMatrix seqMat, double confidencelevel); RcppExport SEXP _markovchain_multinomCI(SEXP transMatSEXP, SEXP seqMatSEXP, SEXP confidencelevelSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMat(transMatSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type seqMat(seqMatSEXP); Rcpp::traits::input_parameter< double >::type confidencelevel(confidencelevelSEXP); rcpp_result_gen = Rcpp::wrap(multinomCI(transMat, seqMat, confidencelevel)); return rcpp_result_gen; END_RCPP } // commClassesKernel List commClassesKernel(NumericMatrix P); RcppExport SEXP _markovchain_commClassesKernel(SEXP PSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); rcpp_result_gen = Rcpp::wrap(commClassesKernel(P)); return rcpp_result_gen; END_RCPP } // communicatingClasses List communicatingClasses(S4 object); RcppExport SEXP _markovchain_communicatingClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(communicatingClasses(object)); return rcpp_result_gen; END_RCPP } // transientStates CharacterVector transientStates(S4 object); RcppExport SEXP _markovchain_transientStates(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(transientStates(object)); return rcpp_result_gen; END_RCPP } // recurrentStates CharacterVector recurrentStates(S4 object); RcppExport SEXP _markovchain_recurrentStates(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(recurrentStates(object)); return rcpp_result_gen; END_RCPP } // recurrentClasses List recurrentClasses(S4 object); RcppExport SEXP _markovchain_recurrentClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(recurrentClasses(object)); return rcpp_result_gen; END_RCPP } // transientClasses List transientClasses(S4 object); RcppExport SEXP _markovchain_transientClasses(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(transientClasses(object)); return rcpp_result_gen; END_RCPP } // reachabilityMatrix LogicalMatrix reachabilityMatrix(S4 obj); RcppExport SEXP _markovchain_reachabilityMatrix(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(reachabilityMatrix(obj)); return rcpp_result_gen; END_RCPP } // isAccessible bool isAccessible(S4 obj, String from, String to); RcppExport SEXP _markovchain_isAccessible(SEXP objSEXP, SEXP fromSEXP, SEXP toSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); Rcpp::traits::input_parameter< String >::type from(fromSEXP); Rcpp::traits::input_parameter< String >::type to(toSEXP); rcpp_result_gen = Rcpp::wrap(isAccessible(obj, from, to)); return rcpp_result_gen; END_RCPP } // summaryKernel List summaryKernel(S4 object); RcppExport SEXP _markovchain_summaryKernel(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(summaryKernel(object)); return rcpp_result_gen; END_RCPP } // firstpassageKernel NumericMatrix firstpassageKernel(NumericMatrix P, int i, int n); RcppExport SEXP _markovchain_firstpassageKernel(SEXP PSEXP, SEXP iSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(firstpassageKernel(P, i, n)); return rcpp_result_gen; END_RCPP } // firstPassageMultipleRCpp NumericVector firstPassageMultipleRCpp(NumericMatrix P, int i, NumericVector setno, int n); RcppExport SEXP _markovchain_firstPassageMultipleRCpp(SEXP PSEXP, SEXP iSEXP, SEXP setnoSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type P(PSEXP); Rcpp::traits::input_parameter< int >::type i(iSEXP); Rcpp::traits::input_parameter< NumericVector >::type setno(setnoSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(firstPassageMultipleRCpp(P, i, setno, n)); return rcpp_result_gen; END_RCPP } // expectedRewardsRCpp NumericVector expectedRewardsRCpp(NumericMatrix matrix, int n, NumericVector rewards); RcppExport SEXP _markovchain_expectedRewardsRCpp(SEXP matrixSEXP, SEXP nSEXP, SEXP rewardsSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); Rcpp::traits::input_parameter< NumericVector >::type rewards(rewardsSEXP); rcpp_result_gen = Rcpp::wrap(expectedRewardsRCpp(matrix, n, rewards)); return rcpp_result_gen; END_RCPP } // expectedRewardsBeforeHittingARCpp double expectedRewardsBeforeHittingARCpp(NumericMatrix matrix, int s0, NumericVector rewards, int n); RcppExport SEXP _markovchain_expectedRewardsBeforeHittingARCpp(SEXP matrixSEXP, SEXP s0SEXP, SEXP rewardsSEXP, SEXP nSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); Rcpp::traits::input_parameter< int >::type s0(s0SEXP); Rcpp::traits::input_parameter< NumericVector >::type rewards(rewardsSEXP); Rcpp::traits::input_parameter< int >::type n(nSEXP); rcpp_result_gen = Rcpp::wrap(expectedRewardsBeforeHittingARCpp(matrix, s0, rewards, n)); return rcpp_result_gen; END_RCPP } // gcd int gcd(int a, int b); RcppExport SEXP _markovchain_gcd(SEXP aSEXP, SEXP bSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< int >::type a(aSEXP); Rcpp::traits::input_parameter< int >::type b(bSEXP); rcpp_result_gen = Rcpp::wrap(gcd(a, b)); return rcpp_result_gen; END_RCPP } // period int period(S4 object); RcppExport SEXP _markovchain_period(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(period(object)); return rcpp_result_gen; END_RCPP } // predictiveDistribution double predictiveDistribution(CharacterVector stringchar, CharacterVector newData, NumericMatrix hyperparam); RcppExport SEXP _markovchain_predictiveDistribution(SEXP stringcharSEXP, SEXP newDataSEXP, SEXP hyperparamSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type stringchar(stringcharSEXP); Rcpp::traits::input_parameter< CharacterVector >::type newData(newDataSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); rcpp_result_gen = Rcpp::wrap(predictiveDistribution(stringchar, newData, hyperparam)); return rcpp_result_gen; END_RCPP } // priorDistribution NumericVector priorDistribution(NumericMatrix transMatr, NumericMatrix hyperparam); RcppExport SEXP _markovchain_priorDistribution(SEXP transMatrSEXP, SEXP hyperparamSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type transMatr(transMatrSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hyperparam(hyperparamSEXP); rcpp_result_gen = Rcpp::wrap(priorDistribution(transMatr, hyperparam)); return rcpp_result_gen; END_RCPP } // hittingProbabilities NumericMatrix hittingProbabilities(S4 object); RcppExport SEXP _markovchain_hittingProbabilities(SEXP objectSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type object(objectSEXP); rcpp_result_gen = Rcpp::wrap(hittingProbabilities(object)); return rcpp_result_gen; END_RCPP } // canonicForm S4 canonicForm(S4 obj); RcppExport SEXP _markovchain_canonicForm(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(canonicForm(obj)); return rcpp_result_gen; END_RCPP } // steadyStates NumericMatrix steadyStates(S4 obj); RcppExport SEXP _markovchain_steadyStates(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(steadyStates(obj)); return rcpp_result_gen; END_RCPP } // absorbingStates CharacterVector absorbingStates(S4 obj); RcppExport SEXP _markovchain_absorbingStates(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(absorbingStates(obj)); return rcpp_result_gen; END_RCPP } // isIrreducible bool isIrreducible(S4 obj); RcppExport SEXP _markovchain_isIrreducible(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(isIrreducible(obj)); return rcpp_result_gen; END_RCPP } // isRegular bool isRegular(S4 obj); RcppExport SEXP _markovchain_isRegular(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(isRegular(obj)); return rcpp_result_gen; END_RCPP } // meanAbsorptionTime NumericVector meanAbsorptionTime(S4 obj); RcppExport SEXP _markovchain_meanAbsorptionTime(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanAbsorptionTime(obj)); return rcpp_result_gen; END_RCPP } // absorptionProbabilities NumericMatrix absorptionProbabilities(S4 obj); RcppExport SEXP _markovchain_absorptionProbabilities(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(absorptionProbabilities(obj)); return rcpp_result_gen; END_RCPP } // meanFirstPassageTime NumericMatrix meanFirstPassageTime(S4 obj, CharacterVector destination); RcppExport SEXP _markovchain_meanFirstPassageTime(SEXP objSEXP, SEXP destinationSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); Rcpp::traits::input_parameter< CharacterVector >::type destination(destinationSEXP); rcpp_result_gen = Rcpp::wrap(meanFirstPassageTime(obj, destination)); return rcpp_result_gen; END_RCPP } // meanRecurrenceTime NumericVector meanRecurrenceTime(S4 obj); RcppExport SEXP _markovchain_meanRecurrenceTime(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanRecurrenceTime(obj)); return rcpp_result_gen; END_RCPP } // meanNumVisits NumericMatrix meanNumVisits(S4 obj); RcppExport SEXP _markovchain_meanNumVisits(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(meanNumVisits(obj)); return rcpp_result_gen; END_RCPP } // isProb bool isProb(double prob); RcppExport SEXP _markovchain_isProb(SEXP probSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< double >::type prob(probSEXP); rcpp_result_gen = Rcpp::wrap(isProb(prob)); return rcpp_result_gen; END_RCPP } // isStochasticMatrix bool isStochasticMatrix(NumericMatrix m, bool byrow); RcppExport SEXP _markovchain_isStochasticMatrix(SEXP mSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type m(mSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(isStochasticMatrix(m, byrow)); return rcpp_result_gen; END_RCPP } // isProbVector bool isProbVector(NumericVector prob); RcppExport SEXP _markovchain_isProbVector(SEXP probSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericVector >::type prob(probSEXP); rcpp_result_gen = Rcpp::wrap(isProbVector(prob)); return rcpp_result_gen; END_RCPP } // checkIsAccesibleMethod bool checkIsAccesibleMethod(S4 obj); RcppExport SEXP _markovchain_checkIsAccesibleMethod(SEXP objSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< S4 >::type obj(objSEXP); rcpp_result_gen = Rcpp::wrap(checkIsAccesibleMethod(obj)); return rcpp_result_gen; END_RCPP } // approxEqual bool approxEqual(NumericMatrix a, NumericMatrix b); RcppExport SEXP _markovchain_approxEqual(SEXP aSEXP, SEXP bSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type a(aSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type b(bSEXP); rcpp_result_gen = Rcpp::wrap(approxEqual(a, b)); return rcpp_result_gen; END_RCPP } // isPartition bool isPartition(List commClasses, CharacterVector states); RcppExport SEXP _markovchain_isPartition(SEXP commClassesSEXP, SEXP statesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type commClasses(commClassesSEXP); Rcpp::traits::input_parameter< CharacterVector >::type states(statesSEXP); rcpp_result_gen = Rcpp::wrap(isPartition(commClasses, states)); return rcpp_result_gen; END_RCPP } // areHittingProbabilities bool areHittingProbabilities(NumericMatrix probs, NumericMatrix hitting, bool byrow); RcppExport SEXP _markovchain_areHittingProbabilities(SEXP probsSEXP, SEXP hittingSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type probs(probsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(areHittingProbabilities(probs, hitting, byrow)); return rcpp_result_gen; END_RCPP } // areMeanNumVisits bool areMeanNumVisits(NumericMatrix probs, NumericMatrix numVisits, NumericMatrix hitting, bool byrow); RcppExport SEXP _markovchain_areMeanNumVisits(SEXP probsSEXP, SEXP numVisitsSEXP, SEXP hittingSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type probs(probsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type numVisits(numVisitsSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(areMeanNumVisits(probs, numVisits, hitting, byrow)); return rcpp_result_gen; END_RCPP } // recurrentHitting bool recurrentHitting(List recurrentClasses, NumericMatrix hitting, CharacterVector states, bool byrow); RcppExport SEXP _markovchain_recurrentHitting(SEXP recurrentClassesSEXP, SEXP hittingSEXP, SEXP statesSEXP, SEXP byrowSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< List >::type recurrentClasses(recurrentClassesSEXP); Rcpp::traits::input_parameter< NumericMatrix >::type hitting(hittingSEXP); Rcpp::traits::input_parameter< CharacterVector >::type states(statesSEXP); Rcpp::traits::input_parameter< bool >::type byrow(byrowSEXP); rcpp_result_gen = Rcpp::wrap(recurrentHitting(recurrentClasses, hitting, states, byrow)); return rcpp_result_gen; END_RCPP } // hittingProbsAreOne bool hittingProbsAreOne(NumericMatrix matrix); RcppExport SEXP _markovchain_hittingProbsAreOne(SEXP matrixSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< NumericMatrix >::type matrix(matrixSEXP); rcpp_result_gen = Rcpp::wrap(hittingProbsAreOne(matrix)); return rcpp_result_gen; END_RCPP } // absorbingAreRecurrentClass bool absorbingAreRecurrentClass(CharacterVector absorbingStates, List recurrentClasses); RcppExport SEXP _markovchain_absorbingAreRecurrentClass(SEXP absorbingStatesSEXP, SEXP recurrentClassesSEXP) { BEGIN_RCPP Rcpp::RObject rcpp_result_gen; Rcpp::RNGScope rcpp_rngScope_gen; Rcpp::traits::input_parameter< CharacterVector >::type absorbingStates(absorbingStatesSEXP); Rcpp::traits::input_parameter< List >::type recurrentClasses(recurrentClassesSEXP); rcpp_result_gen = Rcpp::wrap(absorbingAreRecurrentClass(absorbingStates, recurrentClasses)); return rcpp_result_gen; END_RCPP } static const R_CallMethodDef CallEntries[] = { {"_markovchain_isGen", (DL_FUNC) &_markovchain_isGen, 1}, {"_markovchain_generatorToTransitionMatrix", (DL_FUNC) &_markovchain_generatorToTransitionMatrix, 2}, {"_markovchain_ctmcFit", (DL_FUNC) &_markovchain_ctmcFit, 4}, {"_markovchain_ExpectedTimeRcpp", (DL_FUNC) &_markovchain_ExpectedTimeRcpp, 2}, {"_markovchain_probabilityatTRCpp", (DL_FUNC) &_markovchain_probabilityatTRCpp, 1}, {"_markovchain_impreciseProbabilityatTRCpp", (DL_FUNC) &_markovchain_impreciseProbabilityatTRCpp, 5}, {"_markovchain_seq2freqProb", (DL_FUNC) &_markovchain_seq2freqProb, 1}, {"_markovchain_seq2matHigh", (DL_FUNC) &_markovchain_seq2matHigh, 2}, {"_markovchain_markovchainSequenceRcpp", (DL_FUNC) &_markovchain_markovchainSequenceRcpp, 4}, {"_markovchain_markovchainListRcpp", (DL_FUNC) &_markovchain_markovchainListRcpp, 4}, {"_markovchain_markovchainSequenceParallelRcpp", (DL_FUNC) &_markovchain_markovchainSequenceParallelRcpp, 4}, {"_markovchain_createSequenceMatrix", (DL_FUNC) &_markovchain_createSequenceMatrix, 4}, {"_markovchain_mcListFitForList", (DL_FUNC) &_markovchain_mcListFitForList, 1}, {"_markovchain__matr2Mc", (DL_FUNC) &_markovchain__matr2Mc, 4}, {"_markovchain__list2Mc", (DL_FUNC) &_markovchain__list2Mc, 3}, {"_markovchain_inferHyperparam", (DL_FUNC) &_markovchain_inferHyperparam, 3}, {"_markovchain_markovchainFit", (DL_FUNC) &_markovchain_markovchainFit, 12}, {"_markovchain_noofVisitsDistRCpp", (DL_FUNC) &_markovchain_noofVisitsDistRCpp, 3}, {"_markovchain_multinomialCIForRow", (DL_FUNC) &_markovchain_multinomialCIForRow, 2}, {"_markovchain_multinomCI", (DL_FUNC) &_markovchain_multinomCI, 3}, {"_markovchain_commClassesKernel", (DL_FUNC) &_markovchain_commClassesKernel, 1}, {"_markovchain_communicatingClasses", (DL_FUNC) &_markovchain_communicatingClasses, 1}, {"_markovchain_transientStates", (DL_FUNC) &_markovchain_transientStates, 1}, {"_markovchain_recurrentStates", (DL_FUNC) &_markovchain_recurrentStates, 1}, {"_markovchain_recurrentClasses", (DL_FUNC) &_markovchain_recurrentClasses, 1}, {"_markovchain_transientClasses", (DL_FUNC) &_markovchain_transientClasses, 1}, {"_markovchain_reachabilityMatrix", (DL_FUNC) &_markovchain_reachabilityMatrix, 1}, {"_markovchain_isAccessible", (DL_FUNC) &_markovchain_isAccessible, 3}, {"_markovchain_summaryKernel", (DL_FUNC) &_markovchain_summaryKernel, 1}, {"_markovchain_firstpassageKernel", (DL_FUNC) &_markovchain_firstpassageKernel, 3}, {"_markovchain_firstPassageMultipleRCpp", (DL_FUNC) &_markovchain_firstPassageMultipleRCpp, 4}, {"_markovchain_expectedRewardsRCpp", (DL_FUNC) &_markovchain_expectedRewardsRCpp, 3}, {"_markovchain_expectedRewardsBeforeHittingARCpp", (DL_FUNC) &_markovchain_expectedRewardsBeforeHittingARCpp, 4}, {"_markovchain_gcd", (DL_FUNC) &_markovchain_gcd, 2}, {"_markovchain_period", (DL_FUNC) &_markovchain_period, 1}, {"_markovchain_predictiveDistribution", (DL_FUNC) &_markovchain_predictiveDistribution, 3}, {"_markovchain_priorDistribution", (DL_FUNC) &_markovchain_priorDistribution, 2}, {"_markovchain_hittingProbabilities", (DL_FUNC) &_markovchain_hittingProbabilities, 1}, {"_markovchain_canonicForm", (DL_FUNC) &_markovchain_canonicForm, 1}, {"_markovchain_steadyStates", (DL_FUNC) &_markovchain_steadyStates, 1}, {"_markovchain_absorbingStates", (DL_FUNC) &_markovchain_absorbingStates, 1}, {"_markovchain_isIrreducible", (DL_FUNC) &_markovchain_isIrreducible, 1}, {"_markovchain_isRegular", (DL_FUNC) &_markovchain_isRegular, 1}, {"_markovchain_meanAbsorptionTime", (DL_FUNC) &_markovchain_meanAbsorptionTime, 1}, {"_markovchain_absorptionProbabilities", (DL_FUNC) &_markovchain_absorptionProbabilities, 1}, {"_markovchain_meanFirstPassageTime", (DL_FUNC) &_markovchain_meanFirstPassageTime, 2}, {"_markovchain_meanRecurrenceTime", (DL_FUNC) &_markovchain_meanRecurrenceTime, 1}, {"_markovchain_meanNumVisits", (DL_FUNC) &_markovchain_meanNumVisits, 1}, {"_markovchain_isProb", (DL_FUNC) &_markovchain_isProb, 1}, {"_markovchain_isStochasticMatrix", (DL_FUNC) &_markovchain_isStochasticMatrix, 2}, {"_markovchain_isProbVector", (DL_FUNC) &_markovchain_isProbVector, 1}, {"_markovchain_checkIsAccesibleMethod", (DL_FUNC) &_markovchain_checkIsAccesibleMethod, 1}, {"_markovchain_approxEqual", (DL_FUNC) &_markovchain_approxEqual, 2}, {"_markovchain_isPartition", (DL_FUNC) &_markovchain_isPartition, 2}, {"_markovchain_areHittingProbabilities", (DL_FUNC) &_markovchain_areHittingProbabilities, 3}, {"_markovchain_areMeanNumVisits", (DL_FUNC) &_markovchain_areMeanNumVisits, 4}, {"_markovchain_recurrentHitting", (DL_FUNC) &_markovchain_recurrentHitting, 4}, {"_markovchain_hittingProbsAreOne", (DL_FUNC) &_markovchain_hittingProbsAreOne, 1}, {"_markovchain_absorbingAreRecurrentClass", (DL_FUNC) &_markovchain_absorbingAreRecurrentClass, 2}, {NULL, NULL, 0} }; RcppExport void R_init_markovchain(DllInfo *dll) { R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } markovchain/src/ctmcProbabilistic.cpp0000644000176200001440000000704115137702633017515 0ustar liggesusers#include // [[Rcpp::depends(RcppArmadillo)]] #include #include using namespace Rcpp; using namespace RcppArmadillo; using namespace arma; using namespace std; // [[Rcpp::export(.ExpectedTimeRCpp)]] NumericVector ExpectedTimeRcpp(NumericMatrix x, NumericVector y) { NumericVector out; int size = x.nrow(); arma::mat T = arma::zeros(size, size); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) T(i, j) = x(i, j); arma::vec c = arma::zeros(size); for (int i = 0; i < size; i++) c[i] = y[i]; out = wrap(solve(T,c)); return out; } // [[Rcpp::export(.probabilityatTRCpp)]] NumericMatrix probabilityatTRCpp(NumericMatrix y) { int size = y.nrow(); NumericMatrix out(size,size); arma::mat T = arma::zeros(size, size); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) T(i, j) = y(i, j); T = expmat(T); for (int i = 0; i < size; i++) for (int j = 0; j < size; j++) out(i, j) = T(i, j); return out; } // [[Rcpp::export(.impreciseProbabilityatTRCpp)]] NumericVector impreciseProbabilityatTRCpp(S4 C, int i,int t, int s, double error) { CharacterVector states = C.slot("states"); int noOfstates = states.size(); NumericMatrix non_Q = C.slot("Q"); NumericMatrix non_range = C.slot("range"); arma::mat Q = arma::zeros(noOfstates, noOfstates); arma::mat range = arma::zeros(noOfstates, 2); // initialises armadillo matrices for (int p = 0; p < noOfstates; p++) for (int q = 0; q < noOfstates; q++) Q(p, q) = non_Q(p, q); // initialises armadillo matrices for (int p = 0; p < noOfstates; p++) for (int q = 0; q < 2; q++) range(p, q) = non_range(p, q); // initialses value of norm of Q double QNorm = -1.0; // calculates norm of Q for (int p =0; p < noOfstates; p++) { float sum = 0.0; for (int q = 0; q < noOfstates; q++) { if (Q(p, q) >= 0) sum = sum + Q(p, q); else sum = sum + -Q(p, q); } if (sum * range(p, 1) > QNorm) QNorm = sum * range(p, 1); } // calculates no. of iterations according to error rate, QNorm and other parameters int n; if ((s - t) * QNorm > (s - t) * (s - t) * QNorm * QNorm * 1/ (2 * error)) n = (int)(s - t) * QNorm; else n = (int)(s - t) * (s-t) * QNorm * QNorm * 1/(2 * error); // sets delta value float delta = (s - t) * 1.0/n; // declares and initialises initial f arma::vec Ii(noOfstates); for (int p = 0; p < noOfstates; p++) Ii[p] = 0; Ii[i-1] = 1; // calculation of Qgx vector arma::vec values = Q * Ii; arma::vec Qgx(noOfstates); for (int p = 0; p < noOfstates; p++) Qgx[p] = 0; for (int p = 0; p < noOfstates; p++) { if (values[p] * range(p, 0) < values[p] * range(p, 1)) Qgx[p] = values[p] * range(p,0); else Qgx[p] = values[p] * range(p,1); } Qgx = delta * Qgx; // runs n-1 iterations according to the algorithm // Qgx_i = Qgx_{i-1} + delta*Q*Qgx_{i-1} Qgx = Qgx + Ii; for (int iter = 0; iter < n - 1; iter++) { arma::vec temp = Qgx; values = Q * Qgx; for (int p = 0; p < noOfstates; p++) { // calculating keeping in mind the lower opertaotr values if (values[p] * range(p,0) < values[p] * range(p,1)) Qgx[p] = values[p] * range(p,0); else Qgx[p] = values[p] * range(p,1); } Qgx = delta * Qgx; Qgx = temp + Qgx; } NumericVector out; for (int p = 0; p < noOfstates; p++) out.push_back(Qgx[p]); return out; } markovchain/src/utils.cpp0000644000176200001440000002405715137702633015226 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include #include #include using namespace Rcpp; using namespace arma; using namespace std; // Defined in probabilistic.cpp bool isAccessible(S4 obj, String from, String to); // Defined in probabilistic.cpp LogicalMatrix reachabilityMatrix(S4 obj); // matrix power function // O(log n * m³) where m is the number of rows / cols of A mat matrixPow(const mat& A, int n) { int m = A.n_rows; mat result = eye(m, m); mat partial = A; // We can decompose n = 2^a + 2^b + 2^c ... with a > b > c >= 0 // Compute last = a + 1 while (n > 0) { if ((n & 1) > 0) result = result * partial; partial = partial * partial; n >>= 1; } return result; } // check if two vectors are intersected bool intersects(CharacterVector x, CharacterVector y) { if (x.size() < y.size()) return intersects(y, x); else { unordered_set values; bool intersect = false; for (auto value : x) values.insert(as(value)); for (auto it = y.begin(); it != y.end() && !intersect; ++it) intersect = values.count(as(*it)) > 0; return intersect; } } bool anyElement(const mat& matrix, bool (*condition)(const double&)) { int numRows = matrix.n_rows; int numCols = matrix.n_cols; bool found = false; for (int i = 0; i < numRows && !found; ++i) for (int j = 0; j < numCols && !found; ++j) found = condition(matrix(i, j)); return found; } bool allElements(const mat& matrix, bool (*condition)(const double&)) { int numRows = matrix.n_rows; int numCols = matrix.n_cols; bool all = true; for (int i = 0; i < numRows && all; ++i) for (int j = 0; j < numCols && all; ++j) all = condition(matrix(i, j)); return all; } bool approxEqual(const double& a, const double& b) { if (a >= b) return (a - b) <= 1E-7; else return approxEqual(b, a); } bool approxEqual(const cx_double& a, const cx_double& b){ double x = a.real() - b.real(); double y = a.imag() - b.imag(); return (x*x - y*y) <= 1E-14; } // check if prob is probability or not // [[Rcpp::export(.isProbability)]] bool isProb(double prob) { return (prob >= 0 && prob <= 1); } // checks if a matrix is stochastic (by rows or by columns), i.e. all // elements are probabilities and the rows (cols, resp.) sum 1 // [[Rcpp::export(.isStochasticMatrix)]] bool isStochasticMatrix(NumericMatrix m, bool byrow) { if (!byrow) m = transpose(m); int nrow = m.nrow(); int ncol = m.ncol(); bool isStochastic = true; double rowSum; for (int i = 0; i < nrow && isStochastic; ++i) { rowSum = 0; for (int j = 0; j < ncol && isStochastic; ++j) { isStochastic = m(i, j) >= 0; rowSum += m(i, j); } isStochastic = approxEqual(rowSum, 1); } return isStochastic; } // [[Rcpp::export(.isProbabilityVector)]] bool isProbVector(NumericVector prob) { bool result = true; double sumProbs = 0; for (int i = 0; i < prob.size() && result; ++i) { result = prob[i] >= 0; sumProbs += prob[i]; } return result && approxEqual(sumProbs, 1); } // [[Rcpp::export(.testthatIsAccesibleRcpp)]] bool checkIsAccesibleMethod(S4 obj) { CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); LogicalMatrix reachability = reachabilityMatrix(obj); int m = states.size(); bool correct = true; bool reachable; for (int i = 0; i < m && correct; ++i) { for (int j = 0; j < m && correct; ++j) { reachable = (byrow ? reachability(i, j) : reachability(j, i)); correct = isAccessible(obj, states(i), states(j)) == reachable; } } return correct; } // [[Rcpp::export(.approxEqualMatricesRcpp)]] bool approxEqual(NumericMatrix a, NumericMatrix b) { int a_ncol = a.ncol(); int b_ncol = b.ncol(); int a_nrow = a.nrow(); int b_nrow = b.nrow(); if (a_ncol != b_ncol || a_nrow != b_nrow) return false; else { bool equal = true; for (int i = 0; i < a_nrow && equal; ++i) for (int j = 0; j < a_ncol && equal; ++j) equal = approxEqual(a(i, j), b(i, j)); return equal; } } // This method receives the output of communicatingClasses(object) and object@states // and checks that in fact the communicating classes are a partition of states // Is a method agnostic on whether the Markov Chain was given by rows or columns // [[Rcpp::export(.testthatIsPartitionRcpp)]] bool isPartition(List commClasses, CharacterVector states) { int n = states.size(); unordered_set used; unordered_set originalStates; int numClassStates = 0; bool partition = true; for (auto state : states) originalStates.insert((string) state); // Check that the union of the classes is // states and they do not overlap for (int i = 0; i < commClasses.size() && partition; ++i) { CharacterVector currentClass = commClasses(i); numClassStates += currentClass.size(); for (int j = 0; j < currentClass.size() && partition; ++j) { string state = (string) currentClass(j); partition = used.count(state) == 0 && originalStates.count(state) > 0; used.insert(state); } } return partition && numClassStates == n; } // This is simply a method that checks the following recurrence, // naming p = probs, f = hitting, it checks: // // f(i, j) = p(i, j) + ∑_{k ≠ j} p(i, k) f(k, j) // // where p are the transitionMatrix probs and hitting are the // hitting probabilities for the Markov Chain associated to // probs. byrow indicates whether probs is an stochastic matrix // by rows or by columns. // [[Rcpp::export(.testthatAreHittingRcpp)]] bool areHittingProbabilities(NumericMatrix probs, NumericMatrix hitting, bool byrow) { if (!byrow) { probs = transpose(probs); hitting = transpose(hitting); } int numStates = probs.nrow(); bool holds = true; double result; for (int i = 0; i < numStates && holds; ++i) { for (int j = 0; j < numStates && holds; ++j) { result = 0; for (int k = 0; k < numStates; ++k) if (k != j) result -= probs(i, k) * hitting(k, j); result += hitting(i, j) - probs(i, j); holds = approxEqual(result, 0); } } return holds; } // This is simply a method that checks the following recurrence, // naming p = probs, E = mean number of visits, f = hitting // probabilities, it checks: // // E(i, j) = p(i, j) / (1 - f(j, j)) + ∑_{k ≠ j} p(i, k) E(k, j) // // Note this recurrence is similar to the one for hitting probabilities // We have to take care when E(i, j) = 0, because we would have to check // that in either p(i, k) = 0 or E(k, j) = 0. If E(i, j) = ∞ we would have // to check that either p(i, j) > 0 or (p(i, k) != 0 and E(k, j) = ∞) // // where p are the transitionMatrix probs, numVisits are the mean // number of visits for each state, and hitting are the hitting // probabilities for the Markov Chain associated to probs. // byrow indicates whether probs is an stochastic matrix // by rows or by columns. // [[Rcpp::export(.testthatAreMeanNumVisitsRcpp)]] bool areMeanNumVisits(NumericMatrix probs, NumericMatrix numVisits, NumericMatrix hitting, bool byrow) { if (!byrow) { probs = transpose(probs); numVisits = transpose(numVisits); hitting = transpose(hitting); } int numStates = probs.ncol(); bool holds = true; double result; double inverse; for (int j = 0; j < numStates && holds; ++j) { if (!approxEqual(hitting(j, j), 1)) { inverse = 1 / (1 - hitting(j, j)); for (int i = 0; i < numStates && holds; ++i) { result = 0; for (int k = 0; k < numStates; ++k) if (k != j) result -= probs(i, k) * numVisits(k, j); result += numVisits(i, j) - probs(i, j) * inverse; holds = approxEqual(result, 0); } } } return holds; } // [[Rcpp::export(.testthatRecurrentHittingRcpp)]] bool recurrentHitting(List recurrentClasses, NumericMatrix hitting, CharacterVector states, bool byrow) { if (!byrow) hitting = transpose(hitting); unordered_map stateToIndex; bool correct = true; int n = states.size(); for (int i = 0; i < n; ++i) stateToIndex[(string) states(i)] = i; for (CharacterVector recClass : recurrentClasses) { unordered_set classIndexes; for (auto state : recClass) classIndexes.insert(stateToIndex[(string) state]); for (int i : classIndexes) { for (int j = 0; j < n; ++j) { if (classIndexes.count(j) > 0) correct = correct && approxEqual(hitting(i, j), 1); else correct = correct && approxEqual(hitting(i, j), 0); } } } return correct; } // [[Rcpp::export(.testthatHittingAreOneRcpp)]] bool hittingProbsAreOne(NumericMatrix matrix) { bool allOne = true; int nrow = matrix.nrow(); int ncol = matrix.ncol(); for (int i = 0; i < nrow && allOne; ++i) for (int j = 0; j < ncol && allOne; ++j) allOne = approxEqual(matrix(i, j), 1); return allOne; } // [[Rcpp::export(.testthatAbsorbingAreRecurrentClassRcpp)]] bool absorbingAreRecurrentClass(CharacterVector absorbingStates, List recurrentClasses) { unordered_set singletonRecurrent; unordered_set absorbing; string current; bool diffEmpty = true; for (CharacterVector recClass : recurrentClasses) if (recClass.size() == 1) singletonRecurrent.insert((string) (*recClass.begin())); for (auto state : absorbingStates) absorbing.insert((string) state); for (int i = 0; i < absorbingStates.size() && diffEmpty; ++i) { current = (string) absorbingStates(i); diffEmpty = singletonRecurrent.count(current) > 0; } for (auto it = singletonRecurrent.begin(); it != singletonRecurrent.end() && diffEmpty; ++it) { current = (string) (*it); diffEmpty = absorbing.count(current) > 0; } return diffEmpty; } markovchain/src/Makevars.win0000644000176200001440000000033715137702633015645 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) #CXX_STD = CXX11 PKG_CXXFLAGS += -DRCPP_PARALLEL_USE_TBB=1 PKG_LIBS += $(shell "${R_HOME}/bin${R_ARCH_BIN}/Rscript.exe" \ -e "RcppParallel::RcppParallelLibs()") markovchain/src/fittingFunctions.cpp0000644000176200001440000015221115137702633017415 0ustar liggesusers#ifndef STRICT_R_HEADERS #define STRICT_R_HEADERS // [[Rcpp::depends(RcppParallel)]] // [[Rcpp::depends(RcppArmadillo)]] #include #include #include using namespace Rcpp; using namespace RcppArmadillo; using namespace RcppParallel; using namespace std; #include "helpers.h" #include "mapFitFunctions.h" #include #include // [[Rcpp::export(.markovchainSequenceRcpp)]] CharacterVector markovchainSequenceRcpp(int n, S4 markovchain, CharacterVector t0, bool include_t0 = false) { // character vector to store the result CharacterVector chain(n); // transition mastrix NumericMatrix transitionMatrix = markovchain.slot("transitionMatrix"); // possible states CharacterVector states = markovchain.slot("states"); // current state CharacterVector state = t0; NumericVector rowProbs(states.size()); CharacterVector outstate; for (int i = 0;i < n;i++) { // extracting row probabilties for the given state from transition matrix int row_no = 0; for (int j = 0;j < states.size();j++) { /* last element of state character vector because of markovchainListRcpp, a seq of length greater than 1 is also passed whose end state is the beginning state here */ if (states[j] == state[state.size()-1]) { row_no = j; break; } } for (int j = 0; j < states.size(); j++) { rowProbs[j] = transitionMatrix(row_no, j); } // calculate next state outstate = sample(states, 1, false, rowProbs); chain[i] = outstate[0]; state = outstate; } if (include_t0) chain.push_front(t0[0]); return chain; } bool checkSequenceRcpp(List object) { bool out = true; int nob = object.size(); // if there is only one markovchain object return true if (nob == 1) return(true); S4 ob0, ob1; CharacterVector statesN1, statesN, intersection; for (int i = 1; i < nob;i++) { CharacterVector statesNm1; ob0 = S4(object[i-1]); ob1 = S4(object[i]); statesN1 = ob0.slot("states"); statesN = ob1.slot("states"); NumericMatrix matr = ob0.slot("transitionMatrix"); double csum = 0; for (int j = 0;j < matr.ncol();j++) { csum = 0; for (int k = 0;k < matr.nrow();k++) { csum += matr(k, j); } if (csum != 0) statesNm1.push_back(statesN1[j]); } intersection = intersect(statesNm1, statesN); if (not setequal(intersection, statesNm1)) { out = false; break; } } return(out); } // [[Rcpp::export(.markovchainListRcpp)]] List markovchainListRcpp(int n, List object, bool include_t0 = false, CharacterVector t0 = CharacterVector()) { bool verify = checkSequenceRcpp(object); if (not verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!"); } // size of result vector int sz = n*object.size(); if (include_t0) sz += n; int vin = 0; // useful in filling below vectors NumericVector iteration(sz); CharacterVector values(sz); S4 ob(object[0]); CharacterVector sampledValues, newVals; // Initial State selection if not passed //----------------------------------------------------------------------------- CharacterVector ustates = ob.slot("states"); NumericVector rowProbs; for (int i = 0;i < ustates.size();i++) { rowProbs.push_back(1.0 / ustates.size()); } bool rselect = (t0.size() == 0); if (rselect) { t0 = sample(ustates, 1, false, rowProbs); } //------------------------------------------------------------------------------ // check whether t0 is in unique states or not for (int i = 0;i < ustates.size();i++) { if (ustates[i] == t0[0]) break; else if (i == ustates.size()-1) stop("Error! Initial state not defined"); } // generate n sequence for (int i = 0;i < n;i++) { // random selection of initial state if not passed to the function if (rselect) { t0 = sample(ustates, 1, false, rowProbs); } sampledValues = markovchainSequenceRcpp(1, object[0], t0, include_t0); if (object.size() > 1) { for (int j = 1;j < object.size();j++) { newVals = markovchainSequenceRcpp(1, object[j], sampledValues); sampledValues.push_back(newVals[0]); } } for (int k = 0;k < sampledValues.size();k++) { iteration[vin] = i + 1; values[vin++] = sampledValues[k]; } } return(List::create(iteration, values)); } struct MCList : public Worker { // 3-D matrix where each slice is a transition matrix const arma::cube mat; // number of transition matrices const int num_mat; // matrix where ith row vector store the list of states // names present in ith transition matrix const vector > names; // vector whose ith element store the dimension of ith // transition matrix const vector size_emat; // whether to include first state const bool include_t0; // info about initial state const bool init; // whether initial state is passed to the method const string init_state; // if yes what's the name // each element of list is a sequence list > output; // constructor for initialization MCList(const arma::cube &pmat, const int &pnum_mat, const vector > &pnames, const vector psize_emat, const bool &pinclude_t0, const bool &pinit, const string &pinit_state) : mat(pmat), num_mat(pnum_mat), names(pnames), size_emat(psize_emat), include_t0(pinclude_t0), init(pinit), init_state(pinit_state) {} MCList(const MCList& mclist, Split) : mat(mclist.mat), num_mat(mclist.num_mat), names(mclist.names), size_emat(mclist.size_emat), include_t0(mclist.include_t0), init(mclist.init), init_state(mclist.init_state) {} void operator()(std::size_t begin, std::size_t end) { // to take care of include_t0 unsigned int ci = 0; if (include_t0) ci = 1; // to store single sequence generated each time vector temp(num_mat+ci); // initial probability and states indices arma::vec in_probs(size_emat[0]); arma::vec in_states(size_emat[0]); // assume equal chances of selection of states for the first time for (unsigned int i = 0; i < in_probs.size(); i++) { in_probs[i] = 1.0 / size_emat[0]; in_states[i] = i; } // to store the index of the state selected arma::vec istate; string t0; // every time generate one sequence for (unsigned int p = begin; p < end; p++) { if (not init) { // randomly selected state istate = sample(in_states, 1, false, in_probs); t0 = names[0][istate[0]]; } else { t0 = init_state; } // include the state in the sequence if (include_t0) temp[0] = t0; // to generate one sequence for (unsigned int i = 0; i < (unsigned int)num_mat; i++) { // useful for generating rows probabilty vector unsigned int j = 0; for (j = 0; j < (unsigned int)size_emat[i]; j++) { if (names[i][j] == t0) break; } // vector to be passed to sample method arma::vec probs(size_emat[i]); arma::vec states(size_emat[i]); for (unsigned int k=0; k < probs.size(); k++) { probs[k] = mat(j, k, i); states[k] = k; } // new state selected arma::vec elmt = sample(states, 1, false, probs); t0 = names[i][elmt[0]]; // populate sequence temp[i+ci] = t0; } // insert one sequence to the output output.push_back(temp); } } void join(const MCList& rhs) { // constant iterator to the first element of rhs.output list >::const_iterator it = rhs.output.begin(); // merge the result of two parallel computation for (;it != rhs.output.end();it++) { output.push_back(*it); } } }; // Function to generate a list of sequence of states in parallel from non-homogeneous Markov chains. // // Provided any markovchainList object, it returns a list of sequence of states coming // from the underlying stationary distribution. // // @param listObject markovchainList object // @param n Sample size // @param include_t0 Specify if the initial state shall be used // // @return A List // @author Giorgio Spedicato, Deepak Yadav // // @examples // statesNames <- c("a") // mcA <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(1), nrow = 1, byrow = TRUE, // dimnames = list(statesNames, statesNames))) // // statesNames <- c("a","b") // mcB <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(0.5, 0.5, 0.3, 0.7), nrow = 2, byrow = TRUE, // dimnames = list(statesNames, statesNames))) // // statesNames <- c("a","b","c") // mcC <- new("markovchain", states = statesNames, transitionMatrix = // matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, // byrow = TRUE, dimnames = list(statesNames, statesNames))) // // mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) // // markovchainSequenceParallelRcpp(mclist, 99999, TRUE) // // // [[Rcpp::export(.markovchainSequenceParallelRcpp)]] List markovchainSequenceParallelRcpp(S4 listObject, int n, bool include_t0 = false, CharacterVector init_state = CharacterVector()) { // list of markovchain object List object = listObject.slot("markovchains"); bool verify = checkSequenceRcpp(object); if (not verify) { warning("Warning: some states in the markovchain sequences are not contained in the following states!"); } // store number of transition matrices int num_matrix = object.size(); // maximum of dimension of all transition matrices int max_dim_mat = 0; // to store the dimension of each transition matrices vector size_emat(num_matrix); // to store list of states in a transition matrix CharacterVector states; // calculate max_dim_mat and populate size_emat for (int i = 0; i < num_matrix; i++) { // extract ith markovchain object S4 ob = object[i]; // list of states in ith markovchain object states = ob.slot("states"); // keep track of maximun dimension if (states.size() > max_dim_mat) max_dim_mat = states.size(); // size of ith transition matrix size_emat[i] = states.size(); } // Matrix with ith row store the states in ith t-matrix vector > names(num_matrix, vector(max_dim_mat)); // to store all t-matrix arma::cube mat(max_dim_mat, max_dim_mat, num_matrix); mat.fill(0); for (int i = 0; i < num_matrix;i++) { // ith markovchain object S4 ob = object[i]; // t-matrix and states names NumericMatrix tmat = ob.slot("transitionMatrix"); CharacterVector stat_names = ob.slot("states"); // populate 3-D matrix for (int j = 0;j < tmat.nrow();j++) { for (int k = 0; k < tmat.ncol();k++) { mat(j, k, i) = tmat(j, k); } // populate names of states names[i][j] = stat_names[j]; } } // initial state is passed or not bool init = false; string ini_state; if (init_state.size() != 0) { init = true; ini_state = as(init_state[0]); } // create an object of MCList class MCList mcList(mat, num_matrix, names, size_emat, include_t0, init, ini_state); // start parallel computation parallelReduce(0, n, mcList); // list of sequences return wrap(mcList.output); } // convert a frequency matrix to a transition probability matrix NumericMatrix _toRowProbs(NumericMatrix x, bool sanitize = false) { int nrow = x.nrow(), ncol = x.ncol(); NumericMatrix out(nrow); for (int i = 0; i < nrow; i++) { double rowSum = 0; for (int j = 0; j < ncol; j++) rowSum += x(i, j); for (int j = 0; j < ncol; j++) { // not updating out(i,j) outside sanitize condition as it may lead to runtime error(rowSum=0) if (sanitize == true) { if (rowSum == 0) { out(i, j) = 1.0/ncol; } else { out(i, j) = x(i, j) / rowSum; } } else { if (rowSum == 0) { out(i, j) = 0; } else { out(i, j) = x(i, j) / rowSum; } } } } out.attr("dimnames") = List::create(rownames(x), colnames(x)); return out; } // Create a frequency matrix //' @rdname markovchainFit //' //' @export // [[Rcpp::export]] NumericMatrix createSequenceMatrix(SEXP stringchar, bool toRowProbs = false, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { //--------------------------------------------------------------------- // check whether stringchar is a list or not if (TYPEOF(stringchar) == VECSXP) { List seqs = as(stringchar); CharacterVector pstates; // possiblestates for (int i = 0;i < seqs.size();i++) { CharacterVector tseq = unique(as(seqs[i])); for (int j = 0;j < tseq.size();j++) { if (tseq[j] != "NA") { pstates.push_back(tseq[j]); } } } for (int i = 0;i < possibleStates.size();i++) { pstates.push_back(possibleStates[i]); } pstates = unique(pstates); pstates = pstates.sort(); int sizeMatr = pstates.size(); NumericMatrix freqMatrix(sizeMatr); freqMatrix.attr("dimnames") = List::create(pstates, pstates); for (int i = 0;i < seqs.size();i++) { NumericMatrix temp = createSequenceMatrix(seqs[i], false, false, pstates); freqMatrix += temp; } if (sanitize == true) { for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) rowSum += freqMatrix(i, j); if (rowSum == 0) for (int j = 0; j < sizeMatr; j++) freqMatrix(i, j) = 1; } } if (toRowProbs == true) return _toRowProbs(freqMatrix, sanitize); return freqMatrix; } //--------------------------------------------------------------------- CharacterVector stringChar = as(stringchar); // may include missing values CharacterVector elements_na = unique(union_(stringChar, possibleStates)); // free from missing values CharacterVector elements = clean_nas(elements_na); elements = elements.sort(); int sizeMatr = elements.size(); // output matrix of dimensions equal to total possible states NumericMatrix freqMatrix(sizeMatr); freqMatrix.attr("dimnames") = List::create(elements, elements); CharacterVector rnames = rownames(freqMatrix); if (Rf_isMatrix(stringchar)) { // coerce to CharacterMatrix CharacterMatrix seqMat = as(stringchar); // number of columns must be 2 if (seqMat.ncol() != 2) { stop("Number of columns in the matrix must be 2"); } // populate frequency matrix int posFrom = 0, posTo = 0; for (R_xlen_t i = 0; i < seqMat.nrow(); i ++) { if (seqMat(i, 0) != "NA" && seqMat(i, 1) != "NA") { for (int j = 0; j < rnames.size(); j ++) { if (seqMat(i, 0) == rnames[j]) posFrom = j; if (seqMat(i, 1) == rnames[j]) posTo = j; } freqMatrix(posFrom, posTo)++; } } } else { int posFrom = 0, posTo = 0; for (R_xlen_t i = 0; i < stringChar.size() - 1; i ++) { if (stringChar[i] != "NA" && stringChar[i+1] != "NA") { for (int j = 0; j < rnames.size(); j ++) { if (stringChar[i] == rnames[j]) posFrom = j; if (stringChar[i + 1] == rnames[j]) posTo = j; } freqMatrix(posFrom, posTo)++; } } } // sanitizing if any row in the matrix sums to zero by posing the corresponding diagonal equal to 1/dim if (sanitize == true) { for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) rowSum += freqMatrix(i, j); if (rowSum == 0) for (int j = 0; j < sizeMatr; j++) freqMatrix(i, j) = 1; } } if (toRowProbs == true) return _toRowProbs(freqMatrix, sanitize); return (freqMatrix); } // log-likelihood double _loglikelihood(CharacterVector seq, NumericMatrix transMatr) { // to store the result double out = 0; // states names CharacterVector rnames = rownames(transMatr); // caculate out int from = 0, to = 0; for (R_xlen_t i = 0; i < seq.size() - 1; i ++) { if (seq[i] != "NA" && seq[i+1] != "NA") { for (int r = 0; r < rnames.size(); r ++) { if (rnames[r] == seq[i]) from = r; if (rnames[r] == seq[i + 1]) to = r; } out += log(transMatr(from, to)); } } return out; } // [[Rcpp::export(.mcListFitForList)]] List mcListFitForList(List data) { int l = data.size(); // length of list // pair of length and index // length of sequence data[index] vector > length_seq(l); for (int i = 0;i < l;i++) { CharacterVector temp = as(data[i]); length_seq[i] = make_pair(temp.size(), i); } // increasing order of the length of sequence in the list sort(length_seq.begin(), length_seq.end()); int i = 1; // ith transition int j = 0; // start from least length sequence List out; // to store result while(j < l) { int len = length_seq[j].first; if (i < len) { // transition from (i-1)th to ith CharacterMatrix temp(l-j, 2); // indicates wheter there is a valid transition for the current time of the // markov chain bool validTransition = false; for (int k = j;k < l;k++) { temp(k-j, 0) = (as(data[length_seq[k].second]))[i-1]; temp(k-j, 1) = (as(data[length_seq[k].second]))[i]; if (temp(k-j,0) != "NA" && temp(k-j, 1) != "NA") validTransition = true; } // frequency matrix if (validTransition) out.push_back(createSequenceMatrix(temp, false, true)); i++; } else { j++; } } return out; } List generateCI(double confidencelevel, NumericMatrix freqMatr) { int sizeMatr = freqMatr.nrow(); // the true confidence level is 1-(1-alpha)/2 float true_confidence_level = 1-(1-confidencelevel)/2.0; // transition matrix NumericMatrix initialMatr(sizeMatr, sizeMatr); // calculation of transition matrix // take care of rows with all entries 0 for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j); } // calculate rows probability for (int j = 0; j < sizeMatr; j++) { if (rowSum == 0) { initialMatr(i, j) = 1.0/sizeMatr; } else { initialMatr(i, j) = freqMatr(i, j)/rowSum; } } } // matrices to store end results NumericMatrix lowerEndpointMatr(sizeMatr, sizeMatr); NumericMatrix upperEndpointMatr(sizeMatr, sizeMatr); NumericMatrix standardError(sizeMatr, sizeMatr); // z score for given confidence interval // double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); double zscore = stats::qnorm_0(true_confidence_level, 1.0, 0.0); // populate above defined matrix double marginOfError, lowerEndpoint, upperEndpoint; for (int i = 0; i < sizeMatr; i++) { for (int j = 0; j < sizeMatr; j++) { if (freqMatr(i, j) == 0) { // whether entire ith row is zero or not bool notrans = true; for (int k = 0; k < sizeMatr; k++) { // if the entire ith row is not zero then set notrans to false if (freqMatr(i, k) != 0) { standardError(i, j) = lowerEndpointMatr(i, j) = upperEndpointMatr(i, j) = 0; notrans = false; break; } } // if entire ith row is zero if (notrans) standardError(i, j) = lowerEndpointMatr(i, j) = upperEndpointMatr(i, j) = 1; } else { // standard error calculation standardError(i, j) = initialMatr(i, j) / sqrt(freqMatr(i, j)); // marginal error calculation marginOfError = zscore * standardError(i, j); // lower and upper end point calculation lowerEndpoint = initialMatr(i, j) - marginOfError; upperEndpoint = initialMatr(i, j) + marginOfError; // taking care that upper and lower end point should be between 0(included) and 1(included) lowerEndpointMatr(i, j) = (lowerEndpoint > 1.0) ? 1.0 : ((0.0 > lowerEndpoint) ? 0.0 : lowerEndpoint); upperEndpointMatr(i, j) = (upperEndpoint > 1.0) ? 1.0 : ((0.0 > upperEndpoint) ? 0.0 : upperEndpoint); } } } // set the rows and columns name as states names standardError.attr("dimnames") = upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = freqMatr.attr("dimnames"); return List::create(_["standardError"] = standardError, _["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr); } // Fit DTMC using MLE List _mcFitMle(SEXP data, bool byrow, double confidencelevel, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { NumericMatrix freqMatr = createSequenceMatrix(data, false, false, possibleStates); // matrix size = nrows = ncols int sizeMatr = freqMatr.nrow(); // initial matrix = transition matrix NumericMatrix initialMatr(sizeMatr); // set names of states as rows name and columns name initialMatr.attr("dimnames") = freqMatr.attr("dimnames"); // take care of rows with all entries 0 for (int i = 0; i < sizeMatr; i++) { double rowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j); } // calculate rows probability for (int j = 0; j < sizeMatr; j++) { if (rowSum == 0) { initialMatr(i, j) = (sanitize ? 1.0/sizeMatr : 0); } else { initialMatr(i, j) = freqMatr(i, j)/rowSum; } } } // transpose the matrix if byrow is false if (byrow == false) { initialMatr = transposeMatrix(initialMatr); } // create markov chain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = initialMatr; outMc.slot("name") = "MLE Fit"; List CI = generateCI(confidencelevel, freqMatr); // return a list of important results return List::create(_["estimate"] = outMc, _["standardError"] = CI[0], _["confidenceLevel"] = CI[1], _["lowerEndpointMatrix"] = CI[2], _["upperEndpointMatrix"] = CI[3] ); } // Fit DTMC using Laplacian smooth List _mcFitLaplacianSmooth(CharacterVector stringchar, bool byrow, double laplacian = 0.01, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // create frequency matrix NumericMatrix origNum = createSequenceMatrix(stringchar, false, sanitize, possibleStates); // store dimension of frequency matrix int nRows = origNum.nrow(), nCols = origNum.ncol(); // convert frequency matrix to transition matrix for (int i = 0; i < nRows; i ++) { double rowSum = 0; // add laplacian correction to each entry // also calculate row's sum for (int j = 0; j < nCols; j ++) { origNum(i,j) += laplacian; rowSum += origNum(i,j); } // get a transition matrix and a DTMC for (int j = 0; j < nCols; j ++) { if (rowSum == 0) origNum(i,j) = sanitize ? origNum(i,j)/rowSum : 0; else origNum(i,j) = origNum(i,j)/rowSum; } } // transpose transition matrix = columnwise storage if (byrow == false) { origNum = transposeMatrix(origNum); } // create markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = origNum; outMc.slot("name") = "Laplacian Smooth Fit"; return List::create(_["estimate"] = outMc); } // bootstrap a sequence to produce a list of sample sequences List _bootstrapCharacterSequences(CharacterVector stringchar, int n, R_xlen_t size = -1, CharacterVector possibleStates = CharacterVector()) { // store length of sequence if (size == -1) { size = stringchar.size(); } // frequency matrix NumericMatrix contingencyMatrix = createSequenceMatrix(stringchar, true, true, possibleStates); // many samples from a given a sequence :: bootstrap // res list is helper list List samples, res; // state names CharacterVector itemset = rownames(contingencyMatrix); // number of distinct states int itemsetsize = itemset.size(); // access R sample function Function sample("sample"); for (int i = 0; i < n; i ++) { // charseq to store a fresh sequence CharacterVector charseq, resvec; // select a random number between 0 ans itemsize int rnd = (int)(runif (1)(0) * itemsetsize); // random state String ch = itemset[rnd]; // push start state to charseq charseq.push_back(ch); for (R_xlen_t j = 1; j < size; j ++) { // store row probability NumericVector probsVector; // populate probsVector for (int k = 0; k < itemsetsize; k ++) { if ((std::string)itemset[k] == (std::string) ch) { probsVector = contingencyMatrix(k, _); break; } } // select next state of sequence res = sample(itemset, 1, true, probsVector); resvec = res[0]; // current state ch = resvec[0]; charseq.push_back(ch); } // every add one sequence samples.push_back(charseq); } // return a list of n sequence of same length as of given sequence return samples; } // estimate from the list of bootstrapped matrices List _fromBoot2Estimate(List listMatr) { // number of transition matrices int sampleSize = listMatr.size(); // first transition matrix NumericMatrix firstMat = listMatr[0]; // dimension of matrix int matrDim = firstMat.nrow(); // matrix to store mean and standard deviation NumericMatrix matrMean(matrDim), matrSd(matrDim); // populate mean and sd matrix for (int i = 0; i < matrDim; i ++) { for (int j = 0; j < matrDim; j ++) { NumericVector probsEstimated; for (int k = 0; k < sampleSize; k ++) { NumericMatrix mat = listMatr[k]; probsEstimated.push_back(mat(i,j)); } matrMean(i, j) = mean(probsEstimated); matrSd(i, j) = Rcpp::sd(probsEstimated); } } // set rows and columns names = states names matrMean.attr("dimnames") = List::create(rownames(firstMat), colnames(firstMat)); matrSd.attr("dimnames") = matrMean.attr("dimnames"); // return list of estimated mean transition matrix and estimated sd matrix return List::create(_["estMu"] = matrMean, _["estSigma"] = matrSd); } struct BootstrapList : public Worker { // transition matrix const RMatrix input; // unique states const vector states; // length of sequence const int len; // list of new sequences list > output; // constructor BootstrapList(const NumericMatrix input, const vector states, const int len) : input(input), states(states), len(len) {} BootstrapList(const BootstrapList& bsList, Split) : input(bsList.input), states(bsList.states), len(bsList.len) {} // generate (end-begin) sequences void operator()(std::size_t begin, std::size_t end) { // number of unique states unsigned int n = states.size(); // initial probability vector arma::vec iprobs(n); // probability vector (can be any row of transition matrix) arma::vec probs(n); // unique states indices arma::vec ustates(n); // initialization for (unsigned int i = 0;i < n;i++) { iprobs[i] = 1.0/n; ustates[i] = i; } // to store new state generated arma::vec istate; // every time generate one sequence for (unsigned int p = begin; p < (unsigned int)end;p++) { // randomly select starting state vector result(len); istate = sample(ustates, 1, false, iprobs); result[0] = states[istate[0]]; // given a present state generate a future state for (unsigned int j = 1; j < (unsigned int)len;j++) { // row vector corresponding to state istate[0] for (unsigned int k = 0;k < (unsigned int)n;k++) { probs[k] = input(istate[0], k); } // select future state istate = sample(ustates, 1, false, probs); result[j] = states[istate[0]]; } // populate a sequence output.push_back(result); } } void join(const BootstrapList& rhs) { // constant iterator to the first element of rhs.output list >::const_iterator it = rhs.output.begin(); // merge the result of two parallel computation for (;it != rhs.output.end();it++) { output.push_back(*it); } } }; List _bootstrapCharacterSequencesParallel(CharacterVector stringchar, int n, R_xlen_t size = -1, CharacterVector possibleStates = CharacterVector()) { // store length of sequence if (size == -1) { size = stringchar.size(); } // frequency matrix NumericMatrix contingencyMatrix = createSequenceMatrix(stringchar, true, true, possibleStates); // state names vector itemset = as >(rownames(contingencyMatrix)); // number of distinct states // int itemsetsize = itemset.size(); BootstrapList bsList(contingencyMatrix, itemset, size); parallelReduce(0, n, bsList); return wrap(bsList.output); } // Fit DTMC using bootstrap method List _mcFitBootStrap(CharacterVector data, int nboot, bool byrow, bool parallel, double confidencelevel, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // list of sequence generated using given sequence List theList = (parallel) ? _bootstrapCharacterSequencesParallel(data, nboot, data.size()) : _bootstrapCharacterSequences(data, nboot, data.size()); // number of new sequence int n = theList.size(); // to store frequency matrix for every sequence List pmsBootStrapped(n); // populate pmsBootStrapped if (parallel) for (int i = 0; i < n; i++) pmsBootStrapped[i] = createSequenceMatrix(theList[i], true, sanitize, possibleStates); else for (int i = 0; i < n; i++) pmsBootStrapped[i] = createSequenceMatrix(theList[i], true, sanitize, possibleStates); List estimateList = _fromBoot2Estimate(pmsBootStrapped); // transition matrix NumericMatrix transMatr = _toRowProbs(estimateList["estMu"], sanitize); // markovchain object S4 estimate("markovchain"); estimate.slot("transitionMatrix") = transMatr; estimate.slot("byrow") = byrow; estimate.slot("name") = "BootStrap Estimate"; // z score for given confidence interval double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); // store dimension of matrix int nrows = transMatr.nrow(); int ncols = transMatr.ncol(); // matrix to store end results NumericMatrix lowerEndpointMatr(nrows, ncols), upperEndpointMatr(nrows, ncols); NumericMatrix sigma = estimateList["estSigma"], standardError(nrows, ncols); // populate above defined matrix double marginOfError, lowerEndpoint, upperEndpoint; for (int i = 0; i < nrows; i ++) { for (int j = 0; j < ncols; j ++) { // standard error calculation standardError(i, j) = sigma(i, j) / sqrt(double(n)); // marginal error calculation marginOfError = zscore * standardError(i, j); // lower and upper end point calculation lowerEndpoint = transMatr(i, j) - marginOfError; upperEndpoint = transMatr(i, j) + marginOfError; // taking care that upper and lower end point should be between 0(included) and 1(included) lowerEndpointMatr(i, j) = (lowerEndpoint > 1.0) ? 1.0 : ((0.0 > lowerEndpoint) ? 0.0 : lowerEndpoint); upperEndpointMatr(i, j) = (upperEndpoint > 1.0) ? 1.0 : ((0.0 > upperEndpoint) ? 0.0 : upperEndpoint); } } // set the rows and columns name as states names standardError.attr("dimnames") = upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = transMatr.attr("dimnames"); // return a list of important results List out = List::create(_["estimate"] = estimate, _["standardError"] = standardError, _["confidenceInterval"] = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr), _["bootStrapSamples"] = pmsBootStrapped ); return out; } // convert matrix data to transition probability matrix // [[Rcpp::export(.matr2Mc)]] S4 _matr2Mc(CharacterMatrix matrData, double laplacian = 0, bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { // dimension of input matrix R_xlen_t nRows = matrData.nrow(), nCols = matrData.ncol(); // set of states std::set uniqueVals; // populate uniqueVals set for (R_xlen_t i = 0; i < nRows; i++) for (R_xlen_t j = 0; j < nCols; j++) { if (matrData(i,j) != "NA") uniqueVals.insert((std::string)matrData(i, j)); } for (int i = 0;i < possibleStates.size();i++) { uniqueVals.insert((std::string)possibleStates[i]); } // unique states int usize = uniqueVals.size(); // matrix of dimension usize NumericMatrix contingencyMatrix (usize); // state names as rows name and columns name contingencyMatrix.attr("dimnames") = List::create(uniqueVals, uniqueVals); // iterator for set of states std::set::iterator it; // populate contingency matrix int stateBegin = 0, stateEnd = 0; for (R_xlen_t i = 0; i < nRows; i ++) { for (R_xlen_t j = 1; j < nCols; j ++) { if (matrData(i,j-1) != "NA" && matrData(i,j) != "NA") { // row and column number of begin state and end state int k = 0; for (it = uniqueVals.begin(); it != uniqueVals.end(); ++it, k++) { if (*it == (std::string)matrData(i, j-1)) { stateBegin = k; } if (*it == (std::string)matrData(i,j)) { stateEnd = k; } } contingencyMatrix(stateBegin,stateEnd)++; } } } // add laplacian correction if needed for (int i = 0; i < usize; i++) { double rowSum = 0; for (int j = 0; j < usize; j++) { contingencyMatrix(i,j) += laplacian; rowSum += contingencyMatrix(i, j); } // get the transition matrix and a DTMC for (int j = 0; j < usize; j ++) { if (sanitize == true) { if (rowSum == 0) { contingencyMatrix(i,j) = 1.0/usize; } else { contingencyMatrix(i,j) /= rowSum; } } else { if (rowSum == 0) { contingencyMatrix(i,j) = 0; } else { contingencyMatrix(i,j) /= rowSum; } } } } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = contingencyMatrix; return(outMc); } // convert matrix data to transition probability matrix // [[Rcpp::export(.list2Mc)]] S4 _list2Mc(List data, double laplacian = 0, bool sanitize = false) { // set of states std::set uniqueVals; // populate uniqueVals set for (R_xlen_t i = 0; i < data.size(); i++) { CharacterVector temp = as(data[i]); for (R_xlen_t j = 0; j < temp.size(); j++) { uniqueVals.insert((std::string)temp[j]); } } // unique states int usize = uniqueVals.size(); // matrix of dimension usize NumericMatrix contingencyMatrix (usize); // state names as rows name and columns name contingencyMatrix.attr("dimnames") = List::create(uniqueVals, uniqueVals); // iterator for set of states std::set::iterator it; // populate contingency matrix int stateBegin = 0, stateEnd = 0; for (R_xlen_t i = 0; i < data.size(); i ++) { CharacterVector temp = as(data[i]); for (R_xlen_t j = 1; j < temp.size(); j ++) { // row and column number of begin state and end state int k = 0; for (it = uniqueVals.begin(); it != uniqueVals.end(); ++it, k++) { if (*it == (std::string)temp[j-1]) { stateBegin = k; } if (*it == (std::string)temp[j]) { stateEnd = k; } } contingencyMatrix(stateBegin,stateEnd)++; } } // add laplacian correction if needed for (int i = 0; i < usize; i++) { double rowSum = 0; for (int j = 0; j < usize; j++) { contingencyMatrix(i,j) += laplacian; rowSum += contingencyMatrix(i, j); } // get the transition matrix and a DTMC for (int j = 0; j < usize; j ++) { if (sanitize == true) { if (rowSum == 0) { contingencyMatrix(i,j) = 1.0/usize; } else { contingencyMatrix(i,j) /= rowSum; } } else { if (rowSum == 0) { contingencyMatrix(i,j) = 0; } else { contingencyMatrix(i,j) /= rowSum; } } } } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = contingencyMatrix; return(outMc); } //' @name inferHyperparam //' @title Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set //' @description Since the Bayesian inference approach implemented in the package is based on conjugate priors, //' hyperparameters must be provided to model the prior probability distribution of the chain //' parameters. The hyperparameters are inferred from a given a priori matrix under the assumption //' that the matrix provided corresponds to the mean (expected) values of the chain parameters. A //' scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred //' from a data set. //' //' @param transMatr A valid transition matrix, with dimension names. //' @param scale A vector of scaling factors, each element corresponds to the row names of the provided transition //' matrix transMatr, in the same order. //' @param data A data set from which the hyperparameters are inferred. //' //' @details transMatr and scale need not be provided if data is provided. //' @return Returns the hyperparameter matrix in a list. //' //' @note The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, //' and the elements in the matrix are correspondingly permuted. //' //' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R //' package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi, Giorgio Spedicato //' @seealso \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} //' //' @examples //' data(rain, package = "markovchain") //' inferHyperparam(data = rain$rain) //' //' weatherStates <- c("sunny", "cloudy", "rain") //' weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, //' 0.3, 0.4, 0.3, //' 0.2, 0.4, 0.4), //' byrow = TRUE, nrow = 3, //' dimnames = list(weatherStates, weatherStates)) //' inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) //' //' @export //' // [[Rcpp::export]] List inferHyperparam(NumericMatrix transMatr = NumericMatrix(), NumericVector scale = NumericVector(), CharacterVector data = CharacterVector()) { // stop if there is only one element in the matrix and size of data sequence is zero if (transMatr.nrow() * transMatr.ncol() == 1 && data.size() == 0) stop("Provide the prior transition matrix or the data set in order to infer the hyperparameters"); // to store final result List out; // Number of elements are greater than 1 if (transMatr.nrow() * transMatr.ncol() != 1) { if (scale.size() == 0) { stop("Provide a non-zero scaling factor vector to infer integer hyperparameters"); } // --------begin validity checks for the transition matrix--------- if (transMatr.nrow() != transMatr.ncol()) { stop("Transition matrix dimensions are inconsistent"); } // number of rows in transition matrix int sizeMatr = transMatr.nrow(); // if any element is greater than 1 or less than 0 then raise error // sum of each rows must lie between 1 - eps and 1 + eps for (int i = 0; i < sizeMatr; i++) { double rowSum = 0., eps = 1e-10; for (int j = 0; j < sizeMatr; j++) { if (transMatr(i, j) < 0. || transMatr(i, j) > 1.) stop("The entries in the transition matrix must each belong to the interval [0, 1]"); else rowSum += transMatr(i, j); } if (rowSum <= 1. - eps || rowSum >= 1. + eps) stop("Each rows of the transition matrix must sum to 1"); } // rows and columns name of transition matrix List dimNames = transMatr.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; // sorted rows and columns names CharacterVector sortedColNames(sizeMatr), sortedRowNames(sizeMatr); for (int i = 0; i < sizeMatr; i++) { sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); } sortedColNames.sort(); sortedRowNames.sort(); // rows names vector and columns name vector must be same // and no names in names vectors should be same for (int i = 0; i < sizeMatr; i++) { if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); } // --------end of validity checks for the transition matrix--------- // --------beginning of validity checks for the scale factor vector--------- // length of scale vector must be equal to number of rows in transition matrix if (scale.size() != sizeMatr) stop("The dimensions of the scale vector must match the number of states in the chain"); // if any value in the scale vector is zero for (int i = 0; i < sizeMatr; i++) { if (scale(i) == 0) stop("The scaling factors must be non-zero!"); } // --------end of validity checks for the scale factor vector--------- // Creation of output matrix i.e. hyper param matrix NumericMatrix hpScaled(sizeMatr); hpScaled.attr("dimnames") = List::create(rowNames, colNames); // populate hyper param matrix for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) hpScaled(i, j) = scale(i) * transMatr(i, j); /* shift rows and columns so that names of rows and columns names will be in sorted order */ hpScaled = sortByDimNames(hpScaled); // store list of hyper param scaled matrix out = List::create(_["scaledInference"] = hpScaled); } else if (data.size() != 0) { // to store unique states in sorted order CharacterVector elements = data; for (int i = 0; i < data.size(); i++) elements.push_back(data[i]); elements = unique(elements).sort(); // size of hyperparam matrix int sizeMatr = elements.size(); // create hyperparam matrix NumericMatrix hpData(sizeMatr); hpData.attr("dimnames") = List::create(elements, elements); std::fill(hpData.begin(), hpData.end(), 1); // populate hyper param matrix int posFrom = 0, posTo = 0; for (R_xlen_t i = 0; i < data.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (data[i] == elements[j]) posFrom = j; if (data[i + 1] == elements[j]) posTo = j; } hpData(posFrom,posTo)++; } // ouput data out = List::create(_["dataInference"] = hpData); } return out; } //' @name markovchainFit //' @title Function to fit a discrete Markov chain //' @description Given a sequence of states arising from a stationary state, //' it fits the underlying Markov chain distribution using either MLE (also using a //' Laplacian smoother), bootstrap or by MAP (Bayesian) inference. //' //' @param data It can be a character vector or a \deqn{n x n} matrix or a \deqn{n x n} data frame or a list //' @param method Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace" //' @param byrow it tells whether the output Markov chain should show the transition probabilities by row. //' @param nboot Number of bootstrap replicates in case "bootstrap" is used. //' @param laplacian Laplacian smoothing parameter, default zero. It is only used when "laplace" method //' is chosen. //' @param name Optional character for name slot. //' @param parallel Use parallel processing when performing Boostrap estimates. //' @param confidencelevel \deqn{\alpha} level for conficence intervals width. //' Used only when \code{method} equal to "mle". //' @param confint a boolean to decide whether to compute Confidence Interval or not. //' @param hyperparam Hyperparameter matrix for the a priori distribution. If none is provided, //' default value of 1 is assigned to each parameter. This must be of size //' \deqn{k x k} where k is the number of states in the chain and the values //' should typically be non-negative integers. //' @param stringchar It can be a \deqn{n x n} matrix or a character vector or a list //' @param toRowProbs converts a sequence matrix into a probability matrix //' @param sanitize put 1 in all rows having rowSum equal to zero //' @param possibleStates Possible states which are not present in the given sequence //' //' @details Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} //' contain \code{NAs}, the related \code{NA} containing transitions will be ignored. //' //' @return A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix //' of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method //' is used, the lower and upper confidence bounds are returned along with the standard error. //' The "map" method also returns the expected value of the parameters with respect to the //' posterior distribution. //' @references A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 //' //' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, //' and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, //' Alfred Hubler, Santa Fe Institute //' //' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R //' package version 0.2.5 //' //' @author Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi //' @note This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". //' In addition, parallel facility is not complete, involving only a part of the bootstrap process. //' When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is //' currently available. //' //' @seealso \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} //' @examples //' sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", //' "b", "b", "b", "a") //' sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) //' mcFitMLE <- markovchainFit(data = sequence) //' mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") //' //' na.sequence <- c("a", NA, "a", "b") //' # There will be only a (a,b) transition //' na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) //' mcFitMLE <- markovchainFit(data = na.sequence) //' //' # data can be a list of character vectors //' sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) //' mcFitMap <- markovchainFit(sequences, method = "map") //' mcFitMle <- markovchainFit(sequences, method = "mle") //' @rdname markovchainFit //' //' @export //' // [[Rcpp::export]] List markovchainFit(SEXP data, String method = "mle", bool byrow = true, int nboot = 10, double laplacian = 0, String name = "", bool parallel = false, double confidencelevel = 0.95, bool confint = true, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { if (method != "mle" && method != "bootstrap" && method != "map" && method != "laplace") { stop ("method should be one of \"mle\", \"bootsrap\", \"map\" or \"laplace\""); } // list to store the output List out; // if input data is data frame or matrix if (Rf_inherits(data, "data.frame") || Rf_isMatrix(data)) { // store input data in mat CharacterMatrix mat; // if data is a data.frame force it to matrix if (Rf_inherits(data, "data.frame")) { DataFrame df(data); // matrix : no of rows = no of rows in df : same for number of columns mat = CharacterMatrix(df.nrows(), df.size()); for (R_xlen_t i = 0; i < df.size(); i++) mat(_,i) = CharacterVector(df[i]); } else { mat = data; } // byrow assumes distinct observations (trajectiories) are per row // otherwise transpose if (!byrow) mat = transposeMatrix(mat); S4 outMc = _matr2Mc(mat, laplacian, sanitize, possibleStates); // whether to compute confidence interval or not if (confint) { // convert matrix to list int nrows = mat.nrow(); List manyseq(nrows); for (int i = 0; i < nrows; i++) manyseq[i] = mat(i, _); out = _mcFitMle(manyseq, byrow, confidencelevel, sanitize, possibleStates); out[0] = outMc; } else { out = List::create(_["estimate"] = outMc); } } else if (TYPEOF(data) == VECSXP) { if (method == "mle") { out = _mcFitMle(data, byrow, confidencelevel, sanitize, possibleStates); } else if (method == "map") { out = _mcFitMap(data, byrow, confidencelevel, hyperparam, sanitize, possibleStates); } else stop("method not available for a list"); } else { if (method == "mle") { out = _mcFitMle(data, byrow, confidencelevel, sanitize, possibleStates); } else if (method == "bootstrap") { out = _mcFitBootStrap(data, nboot, byrow, parallel, confidencelevel, sanitize, possibleStates); } else if (method == "laplace") { out = _mcFitLaplacianSmooth(data, byrow, laplacian, sanitize, possibleStates); } else if (method == "map") { out = _mcFitMap(data, byrow, confidencelevel, hyperparam, sanitize, possibleStates); } } // markovchain object S4 estimate = out["estimate"]; if (name != "") { estimate.slot("name") = name; } // transition matrix NumericMatrix transMatr = estimate.slot("transitionMatrix"); // data is neither data frame nor matrix if (!Rf_inherits(data, "data.frame") && !Rf_isMatrix(data) && TYPEOF(data) != VECSXP) out["logLikelihood"] = _loglikelihood(data, transMatr); estimate.slot("states") = rownames(transMatr); out["estimate"] = estimate; return out; } // [[Rcpp::export(.noofVisitsDistRCpp)]] NumericVector noofVisitsDistRCpp(NumericMatrix matrix, int i,int N) { // no of states in the process int noOfStates = matrix.ncol(); arma::vec out = arma::zeros(noOfStates); arma::mat Tmatrix = as(matrix); arma::mat temp = Tmatrix; // initial distribution is in the transition matrix itself for (int j = 0; j < noOfStates; j++) out[j] = Tmatrix(i - 1, j); //The distribution for Nth step is calculates after N multiplication of the transition matrix // and adding everytime the coresponding ratios // finally dividing by N for (int p = 0; p < N - 1; p++) { temp = temp * Tmatrix; for (int j = 0; j < noOfStates; j++) out[j] += temp(i - 1, j); } out = out/N; NumericVector R = wrap(out); return R; } #endif markovchain/src/ctmcClassesAndMethods.cpp0000644000176200001440000000304615137702633020274 0ustar liggesusers#include using namespace Rcpp; //' @name generatorToTransitionMatrix //' @title Function to obtain the transition matrix from the generator //' @description The transition matrix of the embedded DTMC is inferred from the CTMC's generator //' //' @usage generatorToTransitionMatrix(gen, byrow = TRUE) //' //' @param gen The generator matrix //' @param byrow Flag to determine if rows (columns) sum to 0 //' @return Returns the transition matrix. //' //' @references //' Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. //' Anderson, University of Wisconsin at Madison //' //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{rctmc}},\code{\link{ctmc-class}} //' @examples //' energyStates <- c("sigma", "sigma_star") //' byRow <- TRUE //' gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, //' byrow = byRow, dimnames = list(energyStates, energyStates)) //' generatorToTransitionMatrix(gen) //' //' @export // [[Rcpp::export]] NumericMatrix generatorToTransitionMatrix(NumericMatrix gen, bool byrow = true){ NumericMatrix transMatr(gen.nrow()); transMatr.attr("dimnames") = gen.attr("dimnames"); if (byrow) { for (int i = 0; i < gen.nrow(); i++){ for (int j = 0; j < gen.ncol(); j++){ if (i != j) transMatr(i, j) = -gen(i, j) / gen(i, i); } } } else { for (int j = 0; j < gen.ncol(); j++){ for (int i = 0; i < gen.nrow(); i++){ if (i != j) transMatr(i, j) = -gen(i, j) / gen(j, j); } } } return transMatr; } markovchain/src/mapFitFunctions.h0000644000176200001440000001732715137702633016646 0ustar liggesusers/* * Function to remove NA values from a vector */ CharacterVector clean_nas(CharacterVector elements_na){ CharacterVector elements; for(int i = 0; i < elements_na.size();i++) if(elements_na[i] != "NA") elements.push_back(elements_na[i]); return elements; } List _mcFitMap(SEXP data, bool byrow, double confidencelevel, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()) { if(TYPEOF(data) != VECSXP) { data = List::create(as(data)); } List seqs = as(data); CharacterVector elements; for(int i = 0;i < (int)seqs.size();i++) { CharacterVector tseq = unique(as(seqs[i])); for(int j = 0;j < (int)tseq.size();j++) { if(tseq[j] != "NA") { elements.push_back(tseq[j]); } } } elements = unique(union_(elements, possibleStates)).sort(); // number of unique states int sizeMatr = elements.size(); // if no hyperparam argument provided, use default value of 1 for all if(hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { // matrix with all entries 1 NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(elements, elements); for(int i = 0; i < sizeMatr; i++) for(int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } //-----------beginning of validity checking of hyperparam matrix---------------------- // validity check for hyperparam matrix if(hyperparam.nrow() != hyperparam.ncol()) { stop("Dimensions of the hyperparameter matrix are inconsistent"); } if(hyperparam.nrow() < sizeMatr) { stop("Hyperparameters for all state transitions must be provided"); } // extract rows and columns name out of hyperparam matrix List dimNames = hyperparam.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; // size of hyperparam matrix int sizeHyperparam = hyperparam.ncol(); // sorted order of hyperparam rows and columns name CharacterVector sortedColNames(sizeHyperparam), sortedRowNames(sizeHyperparam); for(int i = 0; i < sizeHyperparam; i++) { sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); } sortedColNames.sort(); sortedRowNames.sort(); // validity of hyperparam matrix for(int i = 0; i < sizeHyperparam; i++){ // columns names must be different // rows names must be different if(i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) { stop("The states must all be unique"); } // same states should be present in rows and columns else if(sortedColNames(i) != sortedRowNames(i)) { stop("The set of row names must be the same as the set of column names"); } // chech whether any state in column names exists which is not in the given sequence bool found = false; for(int j = 0; j < sizeMatr; j++) { if(elements(j) == sortedColNames(i)) { found = true; break; } } // hyperparam may contain states not in stringchar if(!found) { elements.push_back(sortedColNames(i)); } } // check for the case where hyperparam has missing data for(int i = 0; i < sizeMatr; i++){ bool found = false; for(int j = 0; j < sizeHyperparam; j++) { if(sortedColNames(j) == elements(i)) { found = true; break; } } if(!found) stop("Hyperparameters for all state transitions must be provided"); } elements = elements.sort(); sizeMatr = elements.size(); for(int i = 0; i < sizeMatr; i++) for(int j = 0; j < sizeMatr; j++) if(hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); //-----------end of validity checking of hyperparam matrix---------------------- // permute the elements of hyperparam such that the row, column names are sorted hyperparam = sortByDimNames(hyperparam); // helper matrices which will help in the calculation of transition matrix // other matrices will be returned as a result NumericMatrix mapEstMatr(sizeMatr), expMatr(sizeMatr); NumericMatrix freqMatr(sizeMatr); mapEstMatr.attr("dimnames") = List::create(elements, elements); expMatr.attr("dimnames") = List::create(elements, elements); // matrices to be returned NumericMatrix lowerEndpointMatr = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); NumericMatrix upperEndpointMatr = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); NumericMatrix stdError = NumericMatrix(mapEstMatr.nrow(), mapEstMatr.ncol()); // populate frequeny matrix for old data; this is used for inference for(int k = 0;k < seqs.size();k++) { CharacterVector stringchar = as(seqs[k]); int posFrom = 0, posTo = 0; for(R_xlen_t i = 0; i < stringchar.size() - 1; i ++) { if(stringchar[i] != "NA" && stringchar[i+1] != "NA"){ for (int j = 0; j < sizeMatr; j ++) { if(stringchar[i] == elements[j]) posFrom = j; if(stringchar[i + 1] == elements[j]) posTo = j; } freqMatr(posFrom,posTo)++; } } } // sanitize and to row probs for (int i = 0; i < sizeMatr; i++) { // rowsum of frequency matrix and hyperparam matrix double rowSum = 0, paramRowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j), paramRowSum += hyperparam(i, j); } // toRowProbs for (int j = 0; j < sizeMatr; j++) { // confidence intervals and bounds double p = freqMatr(i, j) + hyperparam(i, j), q = rowSum + paramRowSum - freqMatr(i, j) - hyperparam(i, j); // expected value of the transition parameters expMatr(i, j) = p / (p + q); if(p + q == sizeMatr) { mapEstMatr(i, j) = (sanitize ? 1.0 / sizeMatr : 0); } else { // maximum a posteriori estimate mapEstMatr(i, j) = (p - 1) / (p + q - sizeMatr); } // populate lowerEndPoint, upperEndPoint and stand error matrices double beta = lbeta(p, q); double cdf = betain(double(mapEstMatr(i, j)), p, q, beta); if(cdf + confidencelevel / 2 > 1.) { upperEndpointMatr(i, j) = 1.; lowerEndpointMatr(i, j) = xinbta(p, q, beta, 1 - confidencelevel); } else if(cdf - confidencelevel / 2 < 0.) { lowerEndpointMatr(i, j) = 0.; upperEndpointMatr(i, j) = xinbta(p, q, beta, confidencelevel); } else { lowerEndpointMatr(i, j) = xinbta(p, q, beta, cdf - confidencelevel / 2); upperEndpointMatr(i, j) = xinbta(p, q, beta, cdf + confidencelevel / 2); } stdError(i, j) = sqrt(p * q / (p + q) / (p + q) / (1 + p + q)); } } // transpose the matrix if columwise result is required if(byrow == false) { mapEstMatr = transposeMatrix(mapEstMatr); } // markovchain object S4 outMc("markovchain"); outMc.slot("transitionMatrix") = mapEstMatr; outMc.slot("name") = "Bayesian Fit"; // message("\n\'estimate\' is the MAP set of parameters where as \'expectedValue\' \nis the expectation // of the parameters with respect to the posterior.\nThe confidence intervals are given for \'estimate\'."); return List::create(_["estimate"] = outMc, _["expectedValue"] = expMatr, _["standardError"] = stdError, _["confidenceInterval"] = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr)); } markovchain/src/ctmcFittingFunctions.cpp0000644000176200001440000001057115137702633020226 0ustar liggesusers#include #include using namespace Rcpp; #include List markovchainFit(SEXP data, String method = "mle", bool byrow = true, int nboot = 10, double laplacian = 0, String name = "", bool parallel = false, double confidencelevel = 0.95, bool confint = true, NumericMatrix hyperparam = NumericMatrix(), bool sanitize = false, CharacterVector possibleStates = CharacterVector()); //' @name ctmcFit //' @title Function to fit a CTMC //' @description This function fits the underlying CTMC give the state //' transition data and the transition times using the maximum likelihood //' method (MLE) //' @usage ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) //' @param data It is a list of two elements. The first element is a character //' vector denoting the states. The second is a numeric vector denoting the //' corresponding transition times. //' @param byrow Determines if the output transition probabilities of the //' underlying embedded DTMC are by row. //' @param name Optional name for the CTMC. //' @param confidencelevel Confidence level for the confidence interval //' construnction. //' @return It returns a list containing the CTMC object and the confidence intervals. //' //' @details Note that in data, there must exist an element wise corresponding //' between the two elements of the list and that data[[2]][1] is always 0. //' @references Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{rctmc}} //' //' @examples //' data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) //' ctmcFit(data) //' //' @export //' // [[Rcpp::export]] List ctmcFit(List data, bool byrow=true, String name="", double confidencelevel = 0.95) { CharacterVector stateData(as(data[0]).size()); for (int i = 0; i < as(data[0]).size(); i++) stateData[i] = as(data[0])[i]; NumericVector transData = data[1]; CharacterVector sortedStates = unique(as(data[0])).sort(); NumericVector stateCount(sortedStates.size()); NumericVector stateSojournTime(sortedStates.size()); List dtmcData = markovchainFit(stateData, "mle", byrow, 10, 0, name, false, confidencelevel); for (int i = 0; i < stateData.size() - 1; i++){ int idx = std::find(sortedStates.begin(), sortedStates.end(), stateData[i]) - sortedStates.begin(); stateCount[idx]++; stateSojournTime[idx] += transData[i+1] - transData[i]; } S4 dtmcEst = dtmcData["estimate"]; NumericMatrix gen = dtmcEst.slot("transitionMatrix"); for (int i = 0; i < gen.nrow(); i++){ for (int j = 0; j < gen.ncol(); j++){ if (stateCount[i] > 0) gen(i, j) *= stateCount[i] / stateSojournTime[i]; } if (stateCount[i] > 0) gen(i, i) = - stateCount[i] / stateSojournTime[i]; else gen(i, i) = -1; } double zscore = stats::qnorm_0(confidencelevel, 1.0, 0.0); NumericVector lowerConfVecLambda(sortedStates.size()), upperConfVecLambda(sortedStates.size()); for (int i = 0; i < sortedStates.size(); i++){ if (stateCount[i] > 0){ auto factor = stateCount[i] / stateSojournTime[i] * (1 - zscore / sqrt(stateCount[i])); lowerConfVecLambda(i) = std::max(0., factor); upperConfVecLambda(i) = std::min(1., factor); } else { lowerConfVecLambda(i) = 1; upperConfVecLambda(i) = 1; } } S4 outCtmc("ctmc"); outCtmc.slot("states") = sortedStates; outCtmc.slot("generator") = gen; outCtmc.slot("name") = name; return List::create(_["estimate"] = outCtmc, _["errors"] = List::create( _["dtmcConfidenceInterval"] = List::create( _["confidenceLevel"] = dtmcData["confidenceLevel"], _["lowerEndpointMatrix"] = dtmcData["lowerEndpointMatrix"], _["upperEndpointMatrix"] = dtmcData["upperEndpointMatrix"]), _["lambdaConfidenceInterval"] = List::create( _["lowerEndpointVector"] = lowerConfVecLambda, _["upperEndpointVector"] = upperConfVecLambda)) ); } markovchain/src/Makevars0000644000176200001440000000022215137702633015042 0ustar liggesusersPKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) #CXX_STD = CXX11 PKG_LIBS += $(shell ${R_HOME}/bin/Rscript -e "RcppParallel::RcppParallelLibs()") markovchain/src/fitHigherOrder.cpp0000644000176200001440000000261315137702633016765 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include using namespace Rcpp; // sequence to frequency probability vector //' @export // [[Rcpp::export]] NumericVector seq2freqProb(CharacterVector sequence) { int n = sequence.size(); CharacterVector states = unique(sequence).sort(); int nstates = states.length(); NumericVector v(nstates); v.names() = states; for (int i = 0; i < n; i ++) v[std::string(sequence[i])] = v[std::string(sequence[i])] + 1.0; NumericVector out = v/sum(v); out.names() = v.names(); return out; } // sequence to transition matrix for higher order markov chai //' @export // [[Rcpp::export]] NumericMatrix seq2matHigh(CharacterVector sequence, int order) { int n = sequence.size(); CharacterVector states = unique(sequence).sort(); int nstates = states.length(); NumericVector colsums(nstates); NumericMatrix out(nstates); out.attr("dimnames") = List::create(states, states); for (int i = 0; i < n - order; i++) { int from = -1, to = -1; for (int j = 0; j < nstates; j++) { if (sequence[i] == states[j]) from = j; if (sequence[i + order] == states[j]) to = j; } if (from != -1 && to != -1) { out(to, from) ++; colsums[from] ++; } } for (int i = 0; i < nstates; i ++) { for (int j = 0; j < nstates; j ++) out(i, j) /= colsums[j]; } return out; } markovchain/src/multinomCI.cpp0000644000176200001440000001135215137702633016140 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include using namespace Rcpp; // returns the column sums of a matrix NumericVector colSums(NumericMatrix m) { NumericVector out; for (int i = 0; i < m.cols(); i ++) out.push_back(sum(m.column(i))); return out; } // poisson distribution double ppois(double n, double lambda) { return R::ppois(n,lambda, true, false); } // moments NumericVector moments(int c, double lambda) { double a = lambda+c; double b = lambda-c; double den = 0, poisA = 0, poisB = 0; if (b < 0) b = 0; if (b > 0) den = ppois(a, lambda) - ppois(b - 1, lambda); if (b == 0) den = ppois(a, lambda); NumericVector mu(4); NumericVector mom(5); for (int r = 1; r <= 4; r ++) { poisA = 0; poisB = 0; if ((a - r) >= 0) poisA = ppois(a, lambda) - ppois(a - r, lambda); if ((a - r) < 0) poisA = ppois(a, lambda); if ((b - r - 1) >= 0) poisB = ppois(b - 1, lambda) - ppois(b - r - 1, lambda); if ((b - r - 1) < 0 && (b - 1) >= 0) poisB = ppois(b - 1, lambda); if ((b - r - 1) < 0 && (b - 1) < 0) poisB = 0; mu[r - 1] = (pow(lambda, r)) * (1 - (poisA - poisB)/den); } mom[0] = mu[0]; mom[1] = mu[1] + mu[0] - pow(mu[0], 2); mom[2] = mu[2] + mu[1] * (3 - 3*mu[0]) + (mu[0] - 3*pow(mu[0], 2) + 2*pow(mu[0], 3)); mom[3] = mu[3] + mu[2] * (6 - 4*mu[0]) + mu[1] * (7 - 12*mu[0] + 6*pow(mu[0], 2)) + mu[0] - 4 * pow(mu[0], 2) + 6 * pow(mu[0], 3) - 3 * pow(mu[0], 4); mom[4] = den; return mom; } // coverage probability for the particular choice of c double truncpoi(int c, NumericVector x, double n, int k) { NumericMatrix m(k,5); for (int i = 0; i < k; i++) { double lambda = x[i]; NumericVector mom = moments(c, lambda); for (int j = 0; j < 5; j ++) m(i, j) = mom[j]; } for (int i = 0; i < k; i ++) m(i, 3) = m(i, 3) - 3 * m(i, 1) * m(i, 1); NumericVector s = colSums(m); double s1 = s[0]; double s2 = s[1]; double s3 = s[2]; double s4 = s[3]; double probn = 1 / (ppois(n, n) - ppois(n - 1, n)); double z = (n - s1) / sqrt(s2); double g1 = s3 / (pow(s2, (3.0 / 2.0))); double g2 = s4 / (pow(s2, 2)); double poly = 1.0 + g1 * (pow(z, 3) - 3 * z) / 6.0 + g2 * (pow(z, 4) - 6.0 * pow(z, 2) + 3.0) / 24.0 + pow(g1, 2) * (pow(z, 6) - 15.0 * pow(z, 4) + 45.0*pow(z, 2) - 15.0)/72.0; double f = poly * exp(-pow(z, 2)/2) / (sqrt(2.0) * R::gammafn(0.5)); double probx=1; for (int i = 0; i < k; i++) probx = probx * m(i, 4); return(probn * probx * f / sqrt(s2)); } // multinomial confidence intervals for a row // [[Rcpp::export(.multinomialCIForRowRcpp)]] NumericMatrix multinomialCIForRow(NumericVector x, double confidencelevel) { double n = std::accumulate(x.begin(), x.end(), 0.0); int k = x.size(); double c = 0; double p = 0, pold = 0; for (int cc = 1; cc <= n; cc ++) { p = truncpoi(cc, x, n, k); if (p > confidencelevel && pold < confidencelevel) { c = cc; break; } pold = p; } NumericMatrix salida(k, 2); double delta = (confidencelevel - pold) / (p - pold); NumericMatrix out(k,5); NumericMatrix num(k,1); c--; double obsp = 0; for (int i = 0; i < k; i++) { num(i, 0) = i; obsp = x[i] / n; out(i, 0) = obsp; out(i, 1) = obsp - c/n; out(i, 2) = obsp + c/n + 2*delta/n; if (out(i, 1) < 0) out(i, 1) = 0; if (out(i, 2) > 1) out(i, 2) = 1; out(i, 3) = obsp - c/n - 1/n; out(i, 4) = obsp + c/n + 1/n; salida(i, 0) = out(i, 1); salida(i, 1) = out(i, 2); } return salida; } // multinomial confidence intervals // [[Rcpp::export(.multinomialCIRcpp)]] List multinomCI(NumericMatrix transMat, NumericMatrix seqMat, double confidencelevel) { NumericMatrix res; NumericVector v; int nrows = transMat.nrow(); int ncols = transMat.ncol(); NumericMatrix lowerEndpointMatr(nrows, ncols); NumericMatrix upperEndpointMatr(nrows, ncols); double lowerEndpoint, upperEndpoint; for (int i = 0; i < nrows; i ++) { v = seqMat.row(i); res = multinomialCIForRow(v, confidencelevel); for (int j = 0; j < res.rows(); j++) { lowerEndpoint = res(j, 0); lowerEndpointMatr(i,j) = lowerEndpoint; upperEndpoint = res(j, 1); upperEndpointMatr(i,j) = upperEndpoint; } } upperEndpointMatr.attr("dimnames") = lowerEndpointMatr.attr("dimnames") = seqMat.attr("dimnames"); List out = List::create(_["confidenceLevel"] = confidencelevel, _["lowerEndpointMatrix"] = lowerEndpointMatr, _["upperEndpointMatrix"] = upperEndpointMatr); return out; } markovchain/src/probabilistic.cpp0000644000176200001440000013202015137702633016702 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include #include #include using namespace Rcpp; using namespace std; using namespace arma; template T sortByDimNames(const T m); typedef unsigned int uint; // Returns whether a Markov chain is ergodic // Declared in this same file bool isIrreducible(S4 obj); // Declared in utils.cpp bool anyElement(const mat& matrix, bool (*condition)(const double&)); // Declared in utils.cpp bool allElements(const mat& matrix, bool (*condition)(const double&)); // Declared in utils.cpp bool approxEqual(const cx_double& a, const cx_double& b); // Used in commClassesKernel void strongConnect(int v, vector& disc, vector& low, vector& onStack, int& index, stack& exploring, NumericMatrix& P, vector>& components, int numStates) { disc[v] = index; low[v] = index; ++index; exploring.push(v); onStack[v] = true; // For each edge (v, w) that goes out of v for (int w = 0; w < numStates; ++w) { if (P(v, w) > 0) { // If w has not been visited yet, compute [w], and update // the minimum node we can travel to from v if (disc[w] == -1) { strongConnect(w, disc, low, onStack, index, exploring, P, components, numStates); low[v] = min(low[v], low[w]); // Otherwise, if w is on the stack of nodes been explored, // update the minimum node we can travel to from v } else if (onStack[w]) { low[v] = min(low[v], disc[w]); } // Otherwise, (v, w) is a cross edge between components // in the DFS tree, do nothing } } // If v is the root of [v], unwind the strongly connected // component from the stack if (low[v] == disc[v]) { bool remaining = true; unordered_set component; int w; while (remaining) { w = exploring.top(); exploring.pop(); component.insert(w); onStack[w] = false; remaining = w != v; } components.push_back(component); } } // This method is based on Tarjan's algorithm to find strongly // connected components in a directed graph: // https://en.wikipedia.org/wiki/Tarjan's_strongly_connected_components_algorithm // to compute the communicating classes. // Output: // - classes: a matrix whose entry (i, j) is true iff i and // j are in the same communicating class // - closed: a vector whose i-th entry indicates whether the // class [i] is closed // // [[Rcpp::export(.commClassesKernelRcpp)]] List commClassesKernel(NumericMatrix P) { // The matrix must be stochastic by rows int numStates = P.ncol(); vector disc(numStates, -1); vector low(numStates, -1); vector onStack(numStates, false); vector> components; stack exploring; int index = 0; // If the component [v] has not been computed yet // (disc[v] == -1), compute it for (int v = 0; v < numStates; ++v) { if (disc[v] == -1) strongConnect(v, disc, low, onStack, index, exploring, P, components, numStates); } // Create the output data structures CharacterVector stateNames = rownames(P); LogicalMatrix classes(numStates, numStates); classes.attr("dimnames") = P.attr("dimnames"); std::fill(classes.begin(), classes.end(), false); LogicalVector closed(numStates); closed.names() = stateNames; for (auto component : components) { bool isClosed = true; // The class is closed iff there is no edge going out of the class for (int i : component) { for (int j = 0; j < numStates; ++j) if (P(i, j) > 0 && component.count(j) == 0) isClosed = false; } // Set the communicating matrix and whether it is closed or not for (int i : component) { closed(i) = isClosed; for (int j : component) classes(i, j) = true; } } return List::create(_["classes"] = classes, _["closed"] = closed); } // Wrapper that computes the communicating states from the matrix generated by // commClassesKernel (a matrix where an entry i,j is TRUE iff i and j are in the // same communicating class). It also needs the list of states names from the // Markov Chain List computeCommunicatingClasses(LogicalMatrix& commClasses, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List classesList; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; if (!computed[i]) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } classesList.push_back(currentClass); } } return classesList; } // [[Rcpp::export(.communicatingClassesRcpp)]] List communicatingClasses(S4 object) { // Returns the underlying communicating classes // It is indifferent if the matrices are stochastic by rows or columns NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; return computeCommunicatingClasses(commClasses, states); } // Wrapper that computes the transient states from a list of the states and a // vector indicating whether the communicating class for each state is closed CharacterVector computeTransientStates(CharacterVector& states, LogicalVector& closedClass) { CharacterVector transientStates; for (int i = 0; i < states.size(); i++) if (!closedClass[i]) transientStates.push_back(states[i]); return transientStates; } // Wrapper that computes the recurrent states from a list of states and a // vector indicating whether the communicating class for each state is closed CharacterVector computeRecurrentStates(CharacterVector& states, LogicalVector& closedClass) { CharacterVector recurrentStates; for (int i = 0; i < states.size(); i++) if (closedClass[i]) recurrentStates.push_back(states[i]); return recurrentStates; } // [[Rcpp::export(.transientStatesRcpp)]] CharacterVector transientStates(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commKernel = commClassesKernel(transitionMatrix); LogicalVector closed = commKernel["closed"]; CharacterVector states = object.slot("states"); return computeTransientStates(states, closed); } // [[Rcpp::export(.recurrentStatesRcpp)]] CharacterVector recurrentStates(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commKernel = commClassesKernel(transitionMatrix); LogicalVector closed = commKernel["closed"]; return computeRecurrentStates(states, closed); } // Wrapper that computes the recurrent classes from the matrix given by // commClassesKernel (which entry i,j is TRUE iff i and j are in the same // communicating class), a vector indicating wheter the class for state is // closed and the states of the Markov Chain List computeRecurrentClasses(LogicalMatrix& commClasses, LogicalVector& closedClass, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List recurrentClassesList; bool isRecurrentClass; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; isRecurrentClass = closedClass(i) && !computed[i]; if (isRecurrentClass) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } recurrentClassesList.push_back(currentClass); } } return recurrentClassesList; } // returns the recurrent classes // [[Rcpp::export(.recurrentClassesRcpp)]] List recurrentClasses(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; return computeRecurrentClasses(commClasses, closed, states); } // Wrapper that computes the transient classes from the matrix given by // commClassesKernel (which entry i,j is TRUE iff i and j are in the same // communicating class), a vector indicating wheter the class for state is // closed and the states of the Markov Chain List computeTransientClasses(LogicalMatrix& commClasses, LogicalVector& closedClass, CharacterVector& states) { int numStates = states.size(); vector computed(numStates, false); List transientClassesList; bool isTransientClass; for (int i = 0; i < numStates; ++i) { CharacterVector currentClass; isTransientClass = !closedClass(i) && !computed[i]; if (isTransientClass) { for (int j = 0; j < numStates; ++j) { if (commClasses(i, j)) { currentClass.push_back(states[j]); computed[j] = true; } } transientClassesList.push_back(currentClass); } } return transientClassesList; } // returns the transient classes // [[Rcpp::export(.transientClassesRcpp)]] List transientClasses(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; return computeTransientClasses(commClasses, closed, states); } // Defined in probabilistic.cpp mat matrixPow(const mat& A, int n); // [[Rcpp::export(.reachabilityMatrixRcpp)]] LogicalMatrix reachabilityMatrix(S4 obj) { NumericMatrix matrix = obj.slot("transitionMatrix"); // Reachability matrix int m = matrix.nrow(); mat X(matrix.begin(), m, m, true); mat reachability = eye(m, m) + sign(X); reachability = matrixPow(reachability, m - 1); LogicalMatrix result = wrap(reachability > 0); result.attr("dimnames") = matrix.attr("dimnames"); return result; } // [[Rcpp::export(.isAccessibleRcpp)]] bool isAccessible(S4 obj, String from, String to) { NumericMatrix probs = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); int fromPos = -1, toPos = -1; bool byrow = obj.slot("byrow"); int m = probs.ncol(); // Compute indices for states from and pos for (int i = 0; i < m; ++i) { if (states[i] == from) fromPos = i; if (states[i] == to) toPos = i; } if (fromPos == -1 || toPos == -1) stop("Please give valid states method"); stack toExplore; toExplore.push(fromPos); vector visited(m, false); visited[fromPos] = true; bool isReachable = false; // DFS until we hit 'to' state or we cannot traverse to more states while (!toExplore.empty() && !isReachable) { int i = toExplore.top(); toExplore.pop(); visited[i] = true; isReachable = i == toPos; for (int j = 0; j < m; ++j) if (((byrow && !approxEqual(probs(i, j), 0)) || (!byrow && !approxEqual(probs(j, i), 0))) && !visited[j]) toExplore.push(j); } return isReachable; } // summary of markovchain object // [[Rcpp::export(.summaryKernelRcpp)]] List summaryKernel(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); bool byrow = object.slot("byrow"); CharacterVector states = object.slot("states"); if (!byrow) transitionMatrix = transpose(transitionMatrix); List commClassesList = commClassesKernel(transitionMatrix); LogicalMatrix commClasses = commClassesList["classes"]; LogicalVector closed = commClassesList["closed"]; List recurrentClasses = computeRecurrentClasses(commClasses, closed, states); List transientClasses = computeTransientClasses(commClasses, closed, states); List summaryResult = List::create(_["closedClasses"] = recurrentClasses, _["recurrentClasses"] = recurrentClasses, _["transientClasses"] = transientClasses); return(summaryResult); } //here the kernel function to compute the first passage // [[Rcpp::export(.firstpassageKernelRcpp)]] NumericMatrix firstpassageKernel(NumericMatrix P, int i, int n) { arma::mat G = as(P); arma::mat Pa = G; arma::mat H(n, P.ncol()); //here Thoralf suggestion //initializing the first row for (unsigned int j = 0; j < G.n_cols; j++) H(0, j) = G(i-1, j); arma::mat E = 1 - arma::eye(P.ncol(), P.ncol()); for (int m = 1; m < n; m++) { G = Pa * (G%E); for (unsigned int j = 0; j < G.n_cols; j ++) H(m, j) = G(i-1, j); } NumericMatrix R = wrap(H); return R; } // [[Rcpp::export(.firstPassageMultipleRCpp)]] NumericVector firstPassageMultipleRCpp(NumericMatrix P,int i, NumericVector setno, int n) { arma::mat G = as(P); arma::mat Pa = G; arma::vec H = arma::zeros(n); //here Thoralf suggestion unsigned int size = setno.size(); //initializing the first row for (unsigned int k = 0; k < size; k++) { H[0] += G(i-1, setno[k]-1); } arma::mat E = 1 - arma::eye(P.ncol(), P.ncol()); for (int m = 1; m < n; m++) { G = Pa * (G%E); for (unsigned int k = 0; k < size; k++) { H[m] += G(i-1, setno[k]-1); } } NumericVector R = wrap(H); return R; } // [[Rcpp::export(.expectedRewardsRCpp)]] NumericVector expectedRewardsRCpp(NumericMatrix matrix, int n, NumericVector rewards) { // initialises output vector NumericVector out; // gets no of states int no_of_states = matrix.ncol(); // initialises armadillo matrices and vectors arma::vec temp = arma::zeros(no_of_states); arma::mat matr = as(matrix); arma::vec v = arma::zeros(no_of_states); // initialses the vector for the base case of dynamic programming expression for (int i=0;i(matrix); arma::mat temp = as(matrix); arma::vec r = as(rewards); arma::mat I = arma::zeros(1,size); I(0,s0-1) = 1; for (int j = 0; j < n; j++) { arma::mat res = I*(temp*r); result = result + res(0,0); temp = temp*matr; } return result; } // greatest common denominator // [[Rcpp::export(.gcdRcpp)]] int gcd (int a, int b) { int c; a = abs(a); b = abs(b); while ( a != 0 ) { c = a; a = b%a; b = c; } return b; } // function to get the period of a DTMC //' @rdname structuralAnalysis //' //' @export // [[Rcpp::export(period)]] int period(S4 object) { bool irreducible = isIrreducible(object); if (!irreducible) { warning("The matrix is not irreducible"); return 0; } else { NumericMatrix P = object.slot("transitionMatrix"); int n = P.ncol(); std::vector r, T(1), w; int d = 0, m = T.size(), i = 0, j = 0; if (n > 0) { arma::vec v(n); v[0] = 1; while (m>0 && d!=1) { i = T[0]; T.erase(T.begin()); w.push_back(i); j = 0; while (j < n) { if (P(i,j) > 0) { r.insert(r.end(), w.begin(), w.end()); r.insert(r.end(), T.begin(), T.end()); double k = 0; for (std::vector::iterator it = r.begin(); it != r.end(); it ++) if (*it == j) k ++; if (k > 0) { int b = v[i] + 1 - v[j]; d = gcd(d, b); } else { T.push_back(j); v[j] = v[i] + 1; } } j++; } m = T.size(); } } // v = v - floor(v/d)*d; return d; } } //' @title predictiveDistribution //' //' @description The function computes the probability of observing a new data //' set, given a data set //' @usage predictiveDistribution(stringchar, newData, hyperparam = matrix()) //' //' @param stringchar This is the data using which the Bayesian inference is //' performed. //' @param newData This is the data whose predictive probability is computed. //' @param hyperparam This determines the shape of the prior distribution of the //' parameters. If none is provided, default value of 1 is assigned to each //' parameter. This must be of size kxk where k is the number of states in the //' chain and the values should typically be non-negative integers. //' @return The log of the probability is returned. //' //' @details The underlying method is Bayesian inference. The probability is //' computed by averaging the likelihood of the new data with respect to the //' posterior. Since the method assumes conjugate priors, the result can be //' represented in a closed form (see the vignette for more details), which is //' what is returned. //' @references //' Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, //' and Out-of-Class Modeling, Christopher C. Strelioff, James P. //' Crutchfield, Alfred Hubler, Santa Fe Institute //' //' Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov //' Chains. R package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi //' @seealso \code{\link{markovchainFit}} //' @examples //' sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", //' "b", "b", "b", "a") //' hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) //' predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) //' hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] //' predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) //' predProb2==predProb //' @export //' // [[Rcpp::export]] double predictiveDistribution(CharacterVector stringchar, CharacterVector newData, NumericMatrix hyperparam = NumericMatrix()) { // construct list of states CharacterVector elements = stringchar; for (int i = 0; i < newData.size(); i++) elements.push_back(newData[i]); elements = unique(elements).sort(); int sizeMatr = elements.size(); // if no hyperparam argument provided, use default value of 1 for all if (hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(elements, elements); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } // validity check if (hyperparam.nrow() != hyperparam.ncol()) stop("Dimensions of the hyperparameter matrix are inconsistent"); if (hyperparam.nrow() < sizeMatr) stop("Hyperparameters for all state transitions must be provided"); List dimNames = hyperparam.attr("dimnames"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; int sizeHyperparam = hyperparam.ncol(); CharacterVector sortedColNames(sizeHyperparam), sortedRowNames(sizeHyperparam); for (int i = 0; i < sizeHyperparam; i++) sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); sortedColNames.sort(); sortedRowNames.sort(); for (int i = 0; i < sizeHyperparam; i++) { if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); bool found = false; for (int j = 0; j < sizeMatr; j++) if (elements(j) == sortedColNames(i)) found = true; // hyperparam may contain states not in stringchar if (!found) elements.push_back(sortedColNames(i)); } // check for the case where hyperparam has missing data for (int i = 0; i < sizeMatr; i++) { bool found = false; for (int j = 0; j < sizeHyperparam; j++) if (sortedColNames(j) == elements(i)) found = true; if (!found) stop("Hyperparameters for all state transitions must be provided"); } elements = elements.sort(); sizeMatr = elements.size(); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) if (hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); // permute the elements of hyperparam such that the row, column names are sorted hyperparam = sortByDimNames(hyperparam); NumericMatrix freqMatr(sizeMatr), newFreqMatr(sizeMatr); double predictiveDist = 0.; // log of the predictive probability // populate frequeny matrix for old data; this is used for inference int posFrom = 0, posTo = 0; for (int i = 0; i < stringchar.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (stringchar[i] == elements[j]) posFrom = j; if (stringchar[i + 1] == elements[j]) posTo = j; } freqMatr(posFrom,posTo)++; } // frequency matrix for new data for (int i = 0; i < newData.size() - 1; i ++) { for (int j = 0; j < sizeMatr; j ++) { if (newData[i] == elements[j]) posFrom = j; if (newData[i + 1] == elements[j]) posTo = j; } newFreqMatr(posFrom,posTo)++; } for (int i = 0; i < sizeMatr; i++) { double rowSum = 0, newRowSum = 0, paramRowSum = 0; for (int j = 0; j < sizeMatr; j++) { rowSum += freqMatr(i, j), newRowSum += newFreqMatr(i, j), paramRowSum += hyperparam(i, j); predictiveDist += lgamma(freqMatr(i, j) + newFreqMatr(i, j) + hyperparam(i, j)) - lgamma(freqMatr(i, j) + hyperparam(i, j)); } predictiveDist += lgamma(rowSum + paramRowSum) - lgamma(rowSum + newRowSum + paramRowSum); } return predictiveDist; } //' @title priorDistribution //' //' @description Function to evaluate the prior probability of a transition //' matrix. It is based on conjugate priors and therefore a Dirichlet //' distribution is used to model the transitions of each state. //' @usage priorDistribution(transMatr, hyperparam = matrix()) //' //' @param transMatr The transition matrix whose probability is the parameter of //' interest. //' @param hyperparam The hyperparam matrix (optional). If not provided, a //' default value of 1 is assumed for each and therefore the resulting //' probability distribution is uniform. //' @return The log of the probabilities for each state is returned in a numeric //' vector. Each number in the vector represents the probability (log) of //' having a probability transition vector as specified in corresponding the //' row of the transition matrix. //' //' @details The states (dimnames) of the transition matrix and the hyperparam //' may be in any order. //' @references Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First //' Order Markov Chains. R package version 0.2.5 //' //' @author Sai Bhargav Yalamanchi, Giorgio Spedicato //' //' @note This function can be used in conjunction with inferHyperparam. For //' example, if the user has a prior data set and a prior transition matrix, //' he can infer the hyperparameters using inferHyperparam and then compute //' the probability of their prior matrix using the inferred hyperparameters //' with priorDistribution. //' @seealso \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} //' //' @examples //' priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), //' nrow = 2, //' dimnames = list(c("a", "b"), c("a", "b"))), //' matrix(c(2, 2, 2, 2), //' nrow = 2, //' dimnames = list(c("a", "b"), c("a", "b")))) //' @export // [[Rcpp::export]] NumericVector priorDistribution(NumericMatrix transMatr, NumericMatrix hyperparam = NumericMatrix()) { // begin validity checks for the transition matrix if (transMatr.nrow() != transMatr.ncol()) stop("Transition matrix dimensions are inconsistent"); int sizeMatr = transMatr.nrow(); for (int i = 0; i < sizeMatr; i++) { double rowSum = 0., eps = 1e-10; for (int j = 0; j < sizeMatr; j++) if (transMatr(i, j) < 0. || transMatr(i, j) > 1.) stop("The entries in the transition matrix must each belong to the interval [0, 1]"); else rowSum += transMatr(i, j); if (rowSum <= 1. - eps || rowSum >= 1. + eps) stop("The rows of the transition matrix must each sum to 1"); } List dimNames = transMatr.attr("dimnames"); if (dimNames.size() == 0) stop("Provide dimnames for the transition matrix"); CharacterVector colNames = dimNames[1]; CharacterVector rowNames = dimNames[0]; CharacterVector sortedColNames(sizeMatr), sortedRowNames(sizeMatr); for (int i = 0; i < sizeMatr; i++) sortedColNames(i) = colNames(i), sortedRowNames(i) = rowNames(i); sortedColNames.sort(); sortedRowNames.sort(); for (int i = 0; i < sizeMatr; i++) if (i > 0 && (sortedColNames(i) == sortedColNames(i-1) || sortedRowNames(i) == sortedRowNames(i-1))) stop("The states must all be unique"); else if (sortedColNames(i) != sortedRowNames(i)) stop("The set of row names must be the same as the set of column names"); // if no hyperparam argument provided, use default value of 1 for all if (hyperparam.nrow() == 1 && hyperparam.ncol() == 1) { NumericMatrix temp(sizeMatr, sizeMatr); temp.attr("dimnames") = List::create(sortedColNames, sortedColNames); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) temp(i, j) = 1; hyperparam = temp; } // validity check for hyperparam if (hyperparam.nrow() != hyperparam.ncol()) stop("Dimensions of the hyperparameter matrix are inconsistent"); if (hyperparam.nrow() != sizeMatr) stop("Hyperparameter and the transition matrices differ in dimensions"); List _dimNames = hyperparam.attr("dimnames"); if (_dimNames.size() == 0) stop("Provide dimnames for the hyperparameter matrix"); CharacterVector _colNames = _dimNames[1]; CharacterVector _rowNames = _dimNames[0]; int sizeHyperparam = hyperparam.ncol(); CharacterVector _sortedColNames(sizeHyperparam), _sortedRowNames(sizeHyperparam); for (int i = 0; i < sizeHyperparam; i++) _sortedColNames(i) = colNames(i), _sortedRowNames(i) = rowNames(i); _sortedColNames.sort(); _sortedRowNames.sort(); for (int i = 0; i < sizeHyperparam; i++) if (sortedColNames(i) != _sortedColNames(i) || sortedRowNames(i) != _sortedRowNames(i)) stop("Hyperparameter and the transition matrices states differ"); for (int i = 0; i < sizeMatr; i++) for (int j = 0; j < sizeMatr; j++) if (hyperparam(i, j) < 1.) stop("The hyperparameter elements must all be greater than or equal to 1"); transMatr = sortByDimNames(transMatr); hyperparam = sortByDimNames(hyperparam); NumericVector logProbVec; for (int i = 0; i < sizeMatr; i++) { double logProb_i = 0., hyperparamRowSum = 0; for (int j = 0; j < sizeMatr; j++) { hyperparamRowSum += hyperparam(i, j); logProb_i += (hyperparam(i, j) - 1.) * log(transMatr(i, j)) - lgamma(hyperparam(i, j)); } logProb_i += lgamma(hyperparamRowSum); logProbVec.push_back(logProb_i); } logProbVec.attr("names") = sortedColNames; return logProbVec; } // [[Rcpp::export(.hittingProbabilitiesRcpp)]] NumericMatrix hittingProbabilities(S4 object) { NumericMatrix transitionMatrix = object.slot("transitionMatrix"); CharacterVector states = object.slot("states"); bool byrow = object.slot("byrow"); if (!byrow) transitionMatrix = transpose(transitionMatrix); int numStates = transitionMatrix.nrow(); arma::mat transitionProbs = as(transitionMatrix); arma::mat hittingProbs(numStates, numStates); // Compute closed communicating classes List commClasses = commClassesKernel(transitionMatrix); List closedClass = commClasses["closed"]; LogicalMatrix communicating = commClasses["classes"]; for (int j = 0; j < numStates; ++j) { arma::mat coeffs = as(transitionMatrix); arma::vec right_part = -transitionProbs.col(j); for (int i = 0; i < numStates; ++i) { coeffs(i, j) = 0; coeffs(i, i) -= 1; } for (int i = 0; i < numStates; ++i) { if (closedClass(i)) { for (int k = 0; k < numStates; ++k) if (k != i) coeffs(i, k) = 0; else coeffs(i, i) = 1; if (communicating(i, j)) right_part(i) = 1; else right_part(i) = 0; } } hittingProbs.col(j) = arma::solve(coeffs, right_part); } NumericMatrix result = wrap(hittingProbs); colnames(result) = states; rownames(result) = states; if (!byrow) result = transpose(result); return result; } // method to convert into canonic form a markovchain object // [[Rcpp::export(.canonicFormRcpp)]] S4 canonicForm(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); bool byrow = obj.slot("byrow"); int numRows = transitions.nrow(); int numCols = transitions.ncol(); NumericMatrix resultTransitions(numRows, numCols); CharacterVector states = obj.slot("states"); unordered_map stateToIndex; unordered_set usedIndices; int currentIndex; List recClasses; S4 input("markovchain"); S4 result("markovchain"); vector indexPermutation(numRows); if (!byrow) { input.slot("transitionMatrix") = transpose(transitions); input.slot("states") = states; input.slot("byrow") = true; transitions = transpose(transitions); } else { input = obj; } recClasses = recurrentClasses(input); // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { string state = (string) states[i]; stateToIndex[state] = i; } int toFill = 0; for (CharacterVector recClass : recClasses) { for (auto state : recClass) { currentIndex = stateToIndex[(string) state]; indexPermutation[toFill] = currentIndex; ++toFill; usedIndices.insert(currentIndex); } } for (int i = 0; i < states.size(); ++i) { if (usedIndices.count(i) == 0) { indexPermutation[toFill] = i; ++toFill; } } CharacterVector newStates(numRows); for (int i = 0; i < numRows; ++i) { int r = indexPermutation[i]; newStates(i) = states(r); for (int j = 0; j < numCols; ++j) { int c = indexPermutation[j]; resultTransitions(i, j) = transitions(r, c); } } rownames(resultTransitions) = newStates; colnames(resultTransitions) = newStates; if (!byrow) resultTransitions = transpose(resultTransitions); result.slot("transitionMatrix") = resultTransitions; result.slot("byrow") = byrow; result.slot("states") = newStates; result.slot("name") = input.slot("name"); return result; } // Function to sort a matrix of vectors lexicographically NumericMatrix lexicographicalSort(NumericMatrix m) { int numCols = m.ncol(); int numRows = m.nrow(); if (numRows > 0 && numCols > 0) { vector> x(numRows, vector(numCols)); for (int i = 0; i < numRows; ++i) for (int j = 0; j < numCols; ++j) x[i][j] = m(i,j); sort(x.begin(), x.end()); NumericMatrix result(numRows, numCols); for (int i = 0; i < numRows; ++i) for (int j = 0; j < numCols; ++j) result(i, j) = x[i][j]; colnames(result) = colnames(m); return result; } else { return m; } } // This method computes the *unique* steady state that exists for an // matrix has to be schocastic by rows // ergodic (= irreducible) matrix vec steadyStateErgodicMatrix(const mat& submatrix) { int nRows = submatrix.n_rows; int nCols = submatrix.n_cols; vec rightPart(nRows + 1, fill::zeros); vec result; mat coeffs(nRows + 1, nCols); // If P is Ergodic, the system (I - P)*w = 0 plus the equation // w_1 + ... + w_m = 1 must have a soultion for (int i = 0; i < nRows; ++i) { for (int j = 0; j < nCols; ++j) { // transpose matrix in-place coeffs(i, j) = submatrix(j, i); if (i == j) coeffs(i, i) -= 1; } } for (int j = 0; j < nCols; ++j) coeffs(nRows, j) = 1; rightPart(nRows) = 1; if (!solve(result, coeffs, rightPart)) stop("Failure computing eigen values / vectors for submatrix in steadyStateErgodicMatrix"); return result; } // Precondition: the matrix should be stochastic by rows NumericMatrix steadyStatesByRecurrentClasses(S4 object) { List recClasses = recurrentClasses(object); int numRecClasses = recClasses.size(); NumericMatrix transitionMatrix = object.slot("transitionMatrix"); CharacterVector states = object.slot("states"); int numCols = transitionMatrix.ncol(); NumericMatrix steady(numRecClasses, numCols); unordered_map stateToIndex; int steadyStateIndex = 0; // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { string state = (string) states[i]; stateToIndex[state] = i; } // For each recurrent class, there must be an steady state for (CharacterVector recurrentClass : recClasses) { int recClassSize = recurrentClass.size(); mat subMatrix(recClassSize, recClassSize); // Fill the submatrix corresponding to the current steady class // Note that for that we have to subset the matrix with the indices // the states in the recurrent class ocuppied in the transition matrix for (int i = 0; i < recClassSize; ++i) { int r = stateToIndex[(string) recurrentClass[i]]; for (int j = 0; j < recClassSize; ++j) { int c = stateToIndex[(string) recurrentClass[j]]; subMatrix(i, j) = transitionMatrix(r, c); } } // Compute the steady states for the given submatrix vec steadyState = steadyStateErgodicMatrix(subMatrix); for (int i = 0; i < recClassSize; ++i) { int c = stateToIndex[(string) recurrentClass[i]]; steady(steadyStateIndex, c) = steadyState(i);; } ++steadyStateIndex; } colnames(steady) = states; return steady; } // [[Rcpp::export(.steadyStatesRcpp)]] NumericMatrix steadyStates(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); S4 object("markovchain"); if (!byrow) { object.slot("transitionMatrix") = transpose(transitions); object.slot("states") = states; object.slot("byrow") = true; } else { object = obj; } // Compute steady states using recurrent classes (there is // exactly one steady state associated with each recurrent class) NumericMatrix result = lexicographicalSort(steadyStatesByRecurrentClasses(object)); if (!byrow) result = transpose(result); return result; } // This method is agnostic on whether the matrix is stochastic // by rows or by columns, we just need the diagonal // [[Rcpp::export(.absorbingStatesRcpp)]] CharacterVector absorbingStates(S4 obj) { NumericMatrix transitionMatrix = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); CharacterVector absorbing; int numStates = states.size(); for (int i = 0; i < numStates; ++i) if (approxEqual(transitionMatrix(i, i), 1)) absorbing.push_back(states(i)); return absorbing; } // [[Rcpp::export(.isIrreducibleRcpp)]] bool isIrreducible(S4 obj) { List commClasses = communicatingClasses(obj); // The markov chain is irreducible iff has only a single communicating class return commClasses.size() == 1; } // [[Rcpp::export(.isRegularRcpp)]] bool isRegular(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); int m = transitions.ncol(); mat probs(transitions.begin(), m, m, true); mat reachable; auto arePositive = [](const double& x){ return x > 0; }; // Taken from the book: // Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. // Theorem 8.5.9 // A is regular iff A^{m²- 2m + 2} > 0 reachable = matrixPow(probs, m*m - 2*m + 2); return allElements(reachable, arePositive); } NumericMatrix computeMeanAbsorptionTimes(mat& probs, CharacterVector& absorbing, CharacterVector& states) { unordered_set toErase; vector indicesToKeep; CharacterVector newNames; string current; for (auto state : absorbing) toErase.insert((string) state); // Compute the states which are not in absorbing // and subset the sub-probability matrix of those // states which are not considered absorbing, Q for (uint i = 0; i < states.size(); ++i) { current = (string) states(i); if (toErase.count(current) == 0) { indicesToKeep.push_back(i); newNames.push_back(current); } } int n = indicesToKeep.size(); uvec indices(indicesToKeep); // Comppute N = 1 - Q mat coeffs = eye(n, n) - probs(indices, indices); vec rightPart = vec(n, fill::ones); mat meanTimes; // Mean absorbing times A are computed as N * A = 1, // where 1 is a column vector of 1s if (!solve(meanTimes, coeffs, rightPart)) stop("Error solving system in meanAbsorptionTime"); NumericMatrix result = wrap(meanTimes); rownames(result) = newNames; return result; } // [[Rcpp::export(.meanAbsorptionTimeRcpp)]] NumericVector meanAbsorptionTime(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); unordered_set allStates; if (!byrow) transitions = transpose(transitions); // Compute recurrent and transient states List commKernel = commClassesKernel(transitions); LogicalVector closed = commKernel["closed"]; CharacterVector transient = computeTransientStates(states, closed); CharacterVector recurrent = computeRecurrentStates(states, closed); // Compute the mean absorption time for the transient states mat probs(transitions.begin(), transitions.nrow(), transitions.ncol(), true); NumericMatrix meanTimes = computeMeanAbsorptionTimes(probs, recurrent, states); NumericVector result; if (meanTimes.ncol() > 0) { result = meanTimes(_, 0); result.attr("names") = transient; } return result; } // [[Rcpp::export(.absorptionProbabilitiesRcpp)]] NumericMatrix absorptionProbabilities(S4 obj) { NumericMatrix transitions = obj.slot("transitionMatrix"); CharacterVector states = obj.slot("states"); string current; bool byrow = obj.slot("byrow"); if (!byrow) transitions = transpose(transitions); unordered_map stateToIndex; // Map each state to the index it has for (int i = 0; i < states.size(); ++i) { current = (string) states[i]; stateToIndex[current] = i; } List commKernel = commClassesKernel(transitions); LogicalVector closed = commKernel["closed"]; CharacterVector transient = computeTransientStates(states, closed); CharacterVector recurrent = computeRecurrentStates(states, closed); vector transientIndxs, recurrentIndxs; // Compute the indexes of the matrix which correspond to transient and recurrent states for (auto state : transient) { current = (string) state; transientIndxs.push_back(stateToIndex[current]); } for (auto state : recurrent) { current = (string) state; recurrentIndxs.push_back(stateToIndex[current]); } int m = transitions.ncol(); int n = transientIndxs.size(); if (n == 0) stop("Markov chain does not have transient states, method not applicable"); // Get the indices in arma::uvec s uvec transientIndices(transientIndxs); uvec recurrentIndices(recurrentIndxs); // Compute N = (1 - Q)^{-1} mat probs(transitions.begin(), m, m, true); mat toInvert = eye(n, n) - probs(transientIndices, transientIndices); mat fundamentalMatrix; if (!inv(fundamentalMatrix, toInvert)) stop("Could not compute fundamental matrix"); // Compute the mean absorption probabilities as F* = N*P[transient, recurrent] mat meanProbs = fundamentalMatrix * probs(transientIndices, recurrentIndices); NumericMatrix result = wrap(meanProbs); rownames(result) = transient; colnames(result) = recurrent; if (!byrow) result = transpose(result); return result; } // [[Rcpp::export(.meanFirstPassageTimeRcpp)]] NumericMatrix meanFirstPassageTime(S4 obj, CharacterVector destination) { bool isErgodic = isIrreducible(obj); if (!isErgodic) stop("Markov chain needs to be ergodic (= irreducile) for this method to work"); else { NumericMatrix transitions = obj.slot("transitionMatrix"); mat probs(transitions.begin(), transitions.nrow(), transitions.ncol(), true); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); int numStates = states.size(); NumericMatrix result; if (!byrow) probs = probs.t(); if (destination.size() > 0) { result = computeMeanAbsorptionTimes(probs, destination, states); // This transpose is intentional to return a row always instead of a column result = transpose(result); return result; } else { result = NumericMatrix(numStates, numStates); vec steadyState = steadyStateErgodicMatrix(probs); mat toInvert(numStates, numStates); mat Z; // Compute inverse for (I - P + W), where P = probs, // and W = steadyState pasted row-wise for (int i = 0; i < numStates; ++i) { for (int j = 0; j < numStates; ++j) { toInvert(i, j) = -probs(i, j) + steadyState(j); if (i == j) toInvert(i, i) += 1; } } if (!inv(Z, toInvert)) stop("Problem computing inverse of matrix inside meanFirstPassageTime"); // Set the result matrix for (int j = 0; j < numStates; ++j) { double r_j = 1.0 / steadyState(j); for (int i = 0; i < numStates; ++i) { result(i, j) = (Z(j,j) - Z(i,j)) * r_j; } } colnames(result) = states; rownames(result) = states; if (!byrow) result = transpose(result); return result; } } } // [[Rcpp::export(.meanRecurrenceTimeRcpp)]] NumericVector meanRecurrenceTime(S4 obj) { NumericMatrix steady = steadyStates(obj); bool byrow = obj.slot("byrow"); if (!byrow) steady = transpose(steady); CharacterVector states = obj.slot("states"); NumericVector result; CharacterVector recurrentStates; for (int i = 0; i < steady.nrow(); ++i) { for (int j = 0; j < steady.ncol(); ++j) { // This depends on our imlementation of the steady // states, but we have the guarantee that the entry // corresponding to a state in a recurrent class is // only going to be positive in one vector and the // entries corresponding to transient states are // going to be zero if (!approxEqual(steady(i, j), 0)) { result.push_back(1.0 / steady(i, j)); recurrentStates.push_back(states(j)); } } } result.attr("names") = recurrentStates; return result; } // [[Rcpp::export(.minNumVisitsRcpp)]] NumericMatrix meanNumVisits(S4 obj) { NumericMatrix hitting = hittingProbabilities(obj); CharacterVector states = obj.slot("states"); bool byrow = obj.slot("byrow"); if (!byrow) hitting = transpose(hitting); int n = hitting.ncol(); bool closeToOne; double inverse; NumericMatrix result(n, n); rownames(result) = states; colnames(result) = states; // Lets call the matrix of hitting probabilities as f // Then mean number of visits from i to j are given by // f_{ij} / (1 - f_{jj}) // having care when f_{ij} -> mean num of visits is zero // and when f_{ij} > 0 and f_{jj} = 1 -> infinity mean // num of visits for (int j = 0; j < n; ++j) { closeToOne = approxEqual(hitting(j, j), 1); if (!closeToOne) inverse = 1 / (1 - hitting(j, j)); for (int i = 0; i < n; ++i) { if (hitting(i, j) == 0) result(i, j) = 0; else { if (closeToOne) result(i, j) = R_PosInf; else result(i, j) = hitting(i, j) * inverse; } } } if (!byrow) result = transpose(result); return result; } markovchain/src/classesAndMethods.cpp0000644000176200001440000000073615137702633017470 0ustar liggesusers// [[Rcpp::depends(RcppArmadillo)]] #include #include #include #include using namespace Rcpp; using namespace arma; using namespace std; // TODO meaning of this method // [[Rcpp::export(.isGenRcpp)]] bool isGen(NumericMatrix gen) { for (int i = 0; i < gen.nrow(); i++) for (int j = 0; j < gen.ncol(); j++) if ((i == j && gen(i, j) > 0) || (i != j && gen(i, j) < 0)) return false; return true; } markovchain/NAMESPACE0000644000176200001440000000477415137702633014016 0ustar liggesusersexport("name<-") export(ExpectedTime) export(absorptionProbabilities) export(assessOrder) export(assessStationarity) export(committorAB) export(createSequenceMatrix) export(ctmcFit) export(expectedRewards) export(expectedRewardsBeforeHittingA) export(firstPassage) export(firstPassageMultiple) export(fitHighOrderMultivarMC) export(fitHigherOrder) export(freq2Generator) export(generatorToTransitionMatrix) export(impreciseProbabilityatT) export(inferHyperparam) export(is.CTMCirreducible) export(is.TimeReversible) export(markovchainFit) export(markovchainListFit) export(markovchainSequence) export(meanAbsorptionTime) export(meanFirstPassageTime) export(meanRecurrenceTime) export(multinomialConfidenceIntervals) export(name) export(noofVisitsDist) export(period) export(predictHommc) export(predictiveDistribution) export(priorDistribution) export(probabilityatT) export(rctmc) export(rmarkovchain) export(seq2freqProb) export(seq2matHigh) export(states) export(transition2Generator) export(verifyEmpiricalToTheoretical) export(verifyHomogeneity) export(verifyMarkovProperty) exportClasses(HigherOrderMarkovChain) exportClasses(ctmc) exportClasses(hommc) exportClasses(markovchain) exportClasses(markovchainList) exportMethods("!=") exportMethods("*") exportMethods("==") exportMethods("[") exportMethods("[[") exportMethods("^") exportMethods(absorbingStates) exportMethods(canonicForm) exportMethods(coerce) exportMethods(communicatingClasses) exportMethods(conditionalDistribution) exportMethods(dim) exportMethods(hittingProbabilities) exportMethods(is.accessible) exportMethods(is.irreducible) exportMethods(is.regular) exportMethods(meanNumVisits) exportMethods(plot) exportMethods(predict) exportMethods(print) exportMethods(recurrentClasses) exportMethods(recurrentStates) exportMethods(show) exportMethods(steadyStates) exportMethods(summary) exportMethods(t) exportMethods(transientClasses) exportMethods(transientStates) exportMethods(transitionProbability) import(Matrix) import(igraph) import(methods) import(parallel) importFrom(Rcpp,evalCpp) importFrom(RcppParallel,RcppParallelLibs) importFrom(expm,"%^%") importFrom(expm,logm) importFrom(grDevices,colors) importFrom(stats,aggregate) importFrom(stats,chisq.test) importFrom(stats,pchisq) importFrom(stats,predict) importFrom(stats,rexp) importFrom(stats,sd) importFrom(stats4,plot) importFrom(stats4,summary) importFrom(utils,packageDescription) useDynLib(markovchain, .registration = TRUE)markovchain/LICENSE0000644000176200001440000000006315137702633013567 0ustar liggesusersYEAR: 2023 COPYRIGHT HOLDER: markovchain authors markovchain/NEWS.md0000644000176200001440000001642715137702633013673 0ustar liggesusers# markovchain 0.10.3 Handled _R_CHECK_PACKAGES_USED_IN_DEMO_ # markovchain 0.10.2 # markovchain 0.10.1 # markovchain 0.10.0 - Uptick pandoc requirements and handling of sparse matrices # News for version 0.9.6 - Handling change of pandoc requirements # News for version 0.9.5 - Downtick R requirements # News for version 0.9.4 - Corrected strange characters # News for version 0.9.3 - Generalized application of requireNamespace(..., quietly = TRUE) - Other fixes to comply to newer CRAN requirements - Move to MIT license # News for version 0.9.2 - Add RcppParallel flags to PKG_LIBS # News for version 0.9.1 ## Current changes - Uptick Matrics requirements and modified Changelogs ## old changes - 2022-09-23 0.9.1 Uptick Matrix reqs - 2022-07-01 0.9.0 Bugfix a state classification error in Rcpp - 2022-05-21 0.8.9 Removal of Matlab package dependency - 2021-05-7 0.8.6 Fix a bug in markovchainListFit that made confusion between lists and data.frames - 2020-12-04 0.8.5-2 Fixing unavailable software issues and language glitches - 2020-09-21 0.8.5-1 Coping with etm unavailability - 2020-05-21 0.8.5 Fixed DoF in verify markov property and supported input in compare teorethical - 2020-05-04 0.8.4.1 2022-09-23 0.9.1 - Uptick Matrix reqs 2022-07-01 0.9.0 - Bugfix a state classification error in Rcpp 2022-05-21 - 0.8.9 Removal of Matlab package dependency - 2021-05-7 0.8.6 Fix a bug in markovchainListFit that made confusion between lists and data.frames - 2020-12-04 0.8.5-2 Fixing unavailable software issues and language glitches - 2020-09-21 0.8.5-1 Coping with etm unavailability - 2020-05-21 0.8.5 Fixed DoF in verify markov property and supported input in compare teorethical - 2020-05-04 0.8.4.1 Fixed presentation - 2020-03-16 0.8.4Limiting output lines in vignettes. - 2020-03-15 0.8.3 Add small changes in code to cope with upcoming R 4.0.0 (stringsAsFactor=TRUE in data.frame). - 2019-12-10 0.8.2 Add small changes in code to cope with upcoming R 4.0.0 (no more check class(x)=='matrix') as well as packages' unavailable. - 2019-08-13 0.7.0 Improves performance and refactors `communicatingClasses`, `recurrentClasses`, `transientStates`, `is.irreducible`, `canonicForm`, `summary` and `steadyStates` methods, porting them to C++ whenever possible and improving the algorithmic complexity of the code. Solves a bug with `steadyStates` method. Adds the methods `recurrentStates` and `transientClasses`. Makes the aforementioned methods work on by column Markov chains. Improves tests, adding checking of mathematical structural properties and hundreds of random test cases. Fixes documentation for `roxygen` and NAMESPACE file for automatic generation using `devtools::document()` - Bumps Ignacio Cordón as author (ORCID included) 2019-07-01 0.6.9.15 Fixed confidence interval calculation: true confidence intervals are now 1-(1-confidence_interval)/2 Various code refactoring - 09-12-2018 0.6.9.14 Added plot from MmgraphR Added meanFirstPassageTime (thanks to Toni Giorgino) Add orcid Add more warning to Statistical Inference Functions - 12-08-2018 0.6.9.12 Improved Rcpp performance - 20-04-2018 0.6.9.9 Fixed typo in vignette MAP method now works also with lists (issue #141) Fix valgrid error - 14-08-2017 0.6.9.8-1 Added is.TimeReversible function added gm_to_markovchain example - 10-07-2017 0.6.9.5 Added empirical bayesian estimate Various additions from GSOC 2017 (see the new vignette) - 31-03-2014 Version 0.6.9 Added sort method Revised numeric tolerance when creating markovchains Added suggestion for which row to fix - 16-03-2017 Version 0.6.8 Deep restructuring of statistical tests - Add parameter confint to markovchainFit Fixed bug in markovchainFitList - Handling of NA - 02-02-2017 Version 0.6.6.2 Add parameter confint to markovchainFit - 27-01-2017 Version 0.6.6.1 Fixing bug in markovchainListFit - 22-01-2017 markovchainFit accepts an uneven list now Added confidence intervals when markovchainFit is given a matrix - 08-12-2016 Added patch to divergence test - 20-08-2016 Fully parallelized bootstrapped markovchain fit - 08-08-2016 Version 0.6 Added multivariate higher order markov chains Better handlign of steady state analysis on non - recurrent Markov Chains Fixed an error in the igraph conversion - 08-07-2016 Fixed C++ 11 variables types - 24-06-2016 Version 0.4.5 Speeding up rmarkovchain using parallel and RcppParallel library. - 14-06-2016 Version 0.4.4.4 Bug fixed for markovchainFit when method = bootstrap - 09-06-2016 Version 0.4.4.2 added sanitize=false paramter to markovchainFit - 31-05-2016 Version 0.4.4.1 Improvement of the internal method checkSequence. name method to set and get the names of markovchain object. - 10-05-2016 Version 0.4.4 rmarkovchain in RCpp (thanks to Deepak and GSOC 2016) Various small fixes - 05-03-2016 Version 0.4.3.1 fixed a bug in the states classification added options to save output of random sampler in a matrix - 10-10-2015 Version 0.4.3 fixed an error in plot function - 08-07-2015 Version 0.3.1 Period to Rcpp (thanks to TAE) communicatingClasses and recurrentClasses (thanks to TAE) Various optimization (thanks to TAE) Initial support for Continuous Time Markov Chains (thanks to SAI) Added new methods: names, != - 15-06-2015 Version 0.3 Added a CrashIntro vignette Most probability function rewritten in Rcpp Added standard errors and confidence intervals for MLE (thanks to Tae) Added confidence intervals for bootstap (thanks to Tae) Added bayesian Maximum A Posteriori estimation (thanks to Sai) - 12-05-2015 Version 0.2.1 Fixed a compatibility issue with R 3 development - 12-04-2015 Version 0.2 This is a milestone for markovchain package, since the package project has been selected within the funded GSOC 2015 projects. Thanks to Tae support now the fitting functions have been rewritten in Rcpp. - 20-03-2015 Version 0.1.3 Fastened the firstpassage time code thanks to Thoralf suggestion - 01-03-2015 Version 0.1.2 Add GitHub project url - 17-02-2015 Version 0.1.1 Fasten markovchain sequence thanks to Mildenberger Thoralf suggestion - 04-01-2015 Version 0.1.0 It is now possible to fit a markovchain and a markovchainList object from a matrix or data.frame Updated vignettes Added tests - 21-06-2014 Version 0.0.9.5 Updated vignettes Added a method to convert a square matrix into a markovchain object. - 20-04-2014 Version 0.0.9 Updated vignette Added parallel processing for bootstrap estimation - 09-02-2014 Version 0.0.8 Minor vignette enhancements Added function to find period of a DTMC - 12-01-2014 Version 0.0.7 Deeply improved vignettes Added predict and summary methods Added function to perform probabilistic analysis - 31-12-2013 Version 0.0.5 Improved vignettes Add predict methods Add methods for transitory states - 04-11-2013 Version 0.0.3 Added various method to easily handle markovchain and markovchainList objects Implemented rmarkovchain and bootstrap fit Improved vignettes markovchain/inst/0000755000176200001440000000000015137710471013537 5ustar liggesusersmarkovchain/inst/CITATION0000644000176200001440000000063215137707663014706 0ustar liggesusersnote <- sprintf("R package version %s", meta$Version) bibentry( bibtype = "Article", title = "Discrete Time Markov Chains with R", author = person(given="Giorgio Alfredo", family="Spedicato"), journal = "The R Journal", year = "2017", volume = "9", number = "2", pages = "84--104", url = "https://journal.r-project.org/articles/RJ-2017-036/index.html" ) markovchain/inst/doc/0000755000176200001440000000000015137710471014304 5ustar liggesusersmarkovchain/inst/doc/gsoc_2017_additions.html0000644000176200001440000021000215137710447020632 0ustar liggesusers Google Summer of Code 2017 Additions

Google Summer of Code 2017 Additions

Vandit Jain

August 2017

Expected Hitting Time using CTMC

The package provides ExpectedTime function to calculate average hitting time from one state to another. Let the final state be j, then for every state \(i \in I\), where \(I\) is the set of all possible states and holding time \(q_{i} > 0\) for every \(i \neq j\). Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector \(p\) to the system of linear equations (Norris 1998): \[\begin{equation} \begin{array}{lcr} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{array} \label{eq:EHT} \end{equation}\]

For example, consider the continuous time markovchain which is as follows:

library(markovchain)
states <- c("a","b","c","d")
byRow <- TRUE
gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0),
nrow = 4,byrow = byRow, dimnames = list(states,states))
ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc")

The generator matrix of the ctmc is: \[ M = \left(\begin{array}{cccc} -1 & 1/2 & 1/2 & 0\\ 1/4 & -1/2 & 1/4 & 1/6\\ 1/6 & 0 & -1/3 & 1/6\\ 0 & 0 & 0 & 0 \end{array}\right) \]

Now if we have to calculate expected hitting time the process will take to hit state \(d\) if we start from \(a\), we apply the \(ExpectedTime\) function. \(ExpectedTime\) function takes four inputs namely a \(ctmc\) class object, initial state \(i\), the final state \(j\) that we have to calculate expected hitting time and a logical parameter whether to use RCpp implementation. By default, the function uses RCpp as it is faster and takes lesser time.

ExpectedTime(ctmc,1,4)
#> [1] 7

We find that the expected hitting time for process to be hit state \(d\) is 7 units in this case.

Calculating Probability at time T using ctmc

The package provides a function probabilityatT to calculate probability of every state according to given ctmc object. The Kolmogorov’s backward equation gives us a relation between transition matrix at any time t with the generator matrix(Dobrow 2016):

\[\begin{equation} P'(t) = QP(t) \end{equation}\]

Here we use the solution of this differential equation \(P(t) = P(0)e^{tQ}\) for \(t \geq 0\) and \(P(0) = I\). In this equation, \(P(t)\) is the transition function at time t. The value \(P(t)[i][j]\) at time \(P(t)\) describes the conditional probability of the state at time \(t\) to be equal to j if it was equal to i at time \(t=0\). It takes care of the case when ctmc object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix \(P(t)\).

Also to mention is that the function is also implemented using RCpp and can be used used to lessen the time of computation. It is used by default. Next, We consider both examples where initial state is given and case where initial state is not given.

In the first case, the function takes two inputs, first of them is an object of the S4 class ‘ctmc’ and second is the final time \(t\).

probabilityatT(ctmc,1)
#>            a          b         c          d
#> a 0.41546882 0.24714119 0.2703605 0.06702946
#> b 0.12357060 0.63939068 0.0348290 0.20220972
#> c 0.09012017 0.02321933 0.7411205 0.14553997
#> d 0.00000000 0.00000000 0.0000000 1.00000000

Here we get an output in the form of a transition matrix.

If we take the second case i.e. considering some initial input:

probabilityatT(ctmc,1,1)
#> [1] 0.41546882 0.24714119 0.27036052 0.06702946

In this case we get the probabilities corresponding to every state. this also includes probability that the process hits the same state \(a\) after time \(t=1\).

Plotting generator matrix of continuous-time markovchains

The package provides a plot function for plotting a generator matrix \(Q\) in the form of a directed graph where every possible state is assigned a node. Edges connecting these nodes are weighted. Weight of the edge going from a state \(i\) to state \(j\) is equal to the value \(Q_{ij}\). This gives a picture of the generator matrix.

For example, we build a ctmc-class object to plot it.

energyStates <- c("sigma", "sigma_star")
byRow <- TRUE
gen <- matrix(data = c(-3, 3,
                       1, -1), nrow = 2,
              byrow = byRow, dimnames = list(energyStates, energyStates))
molecularCTMC <- new("ctmc", states = energyStates, 
                 byrow = byRow, generator = gen, 
                 name = "Molecular Transition Model")    

Now if we plot this function we get the following graph:

plot(molecularCTMC)
#> Warning: Non-positive edge weight found, ignoring all weights during graph
#> layout.

The figure shown is built using the \(igraph\) package. The package also provides options of plotting graph using \(diagram\) and \(DiagrameR\) package. Plot using these packages can be built using these commands:

if(requireNamespace(package='ctmcd', quietly = TRUE)) {
plot(molecularCTMC,package = "diagram")
} else {
  print("diagram package unavailable")
}

Similarly, one can easily replace \(diagram\) package with \(DiagrammeR\).

Imprecise Continuous-Time Markov chains

Continuous-time Markov chains are mathematical models that are used to describe the state-evolution of dynamical systems under stochastic uncertainty. However, building models using continuous time markovchains take in consideration a number of assumptions which may not be realistic for the domain of application; in particular; the ability to provide exact numerical parameter assessments, and the applicability of time-homogeneity and the eponymous Markov property. Hence we take ICTMC into consideration.

More technically, an ICTMC is a set of “precise” continuous-time finite-state stochastic processes, and rather than computing expected values of functions, we seek to compute lower expectations, which are tight lower bounds on the expectations that correspond to such a set of “precise” models.

Types of ICTMCs

For any non-empty bounded set of rate matrices \(L\), and any non-empty set \(M\) of probability mass functions on \(X\), we define the following three sets of stochastic processes that are jointly consistent with \(L\) and \(M\):

  • \(P^{W}_{L,M}\) is the consistent set of all well-behaved stochastic processes;
  • \(P^{WM}_{L,M}\) is the consistent set of all well-behaved Markov chains;
  • \(P^{WHM}_{L,M}\) is the consistent set of all well-behaved homogeneous Markov chains(Thomas Krak 2017).

From a practical point of view, after having specified a (precise) stochastic process, one is typically interested in the expected value of some function of interest, or the probability of some event. Similarly, in this work, our main objects of consideration will be the lower probabilities that correspond to the ICTMCs.

Lower Transition Rate Operators for ICTMCs

A map \(Q_{l}\) from \(L(X)\) to \(L(X)\) is called a lower transition rate operator if, for all \(f,g \in L(X)\), all \(\lambda \in R_{\geq 0}\), all \(\mu \in L(X)\), and all \(x \in X\)(Thomas Krak 2017):

  1. \([Q_{l}m](x) = 0\)
  2. \([Q_{l}I](x) \geq 0 \forall y \in X\) such that \(x \neq y\)
  3. \([Q_{l}(f + g)](x)\geq [Q_{l}f](x) + [Q_{l}g](x)\)
  4. \([Q_{l}(l f)](x) = \lambda Q_{l}f[(x)]\)

Lower Transition Operators

A map \(T_{l}\) from \(L (X )\) to \(L (X )\) is called a lower transition operator if, for all \(f,g \in L(X)\), all \(\lambda \in R_{\geq 0}\), all \(\mu \in L(X)\), and all \(x \in X\)(Thomas Krak 2017):

  1. \([T_{l} f](x) \geq min(f(y) : y \in L)\)
  2. \([T_{l}(f +g)](x) \geq [T_{l} f](x)+[T_{l}g](x)\)
  3. \([T_{l}(\lambda f)](x) = l [T_{l} f](x)\)

ImpreciseprobabilityatT function

Now I would like to come onto the practical purpose of using ICTMC classes. ICTMC classes in these package are defined to represent a generator that is defined in such a way that every row of the generator corresponding to every state in the process is governed by a separate variable. As defined earlier, an imprecise continuous time markovchain is a set of many precise CTMCs. Hence this representation of set of precise CTMCs can be used to calulate transition probability at some time in future. This can be seen as an analogy with probabilityatT function. It is used to calculate the transition function at some later time t using generatoe matrix.

For every generator matrix, we have a corresponding transition function. Similarly, for every Lower Transition rate operator of an ICTMC, we have a corresponding lower transition operator denoted by \(L_{t}^{s}\). Here \(t\) is the initial time and \(s\) is the final time.

Now we mention a proposition(Thomas Krak 2017) which states that: Let \(Q_{l}\) be a lower transition rate operator, choose any time \(t\) and \(s\) both greater than 0 such that \(t \leq s\), and let \(L_{t}^{s}\) be the lower transition operator corresponding to \(Q_{l}\). Then for any \(f \in L(X)\) and \(\epsilon \in R_{>0}\), if we choose any \(n \in N\) such that:

\[n \geq max((s-t)*||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\]

with \(||f||_{v}\) := max \(f\) - min \(f\), we are guaranteed that(Thomas Krak 2017)

\[ ||L_{t}^{s} - \prod_{i=1}^{n}(I + \Delta Q_{l}) || \leq \epsilon \]

with \(\Delta := \frac{s-t}{n}\)

Simple put this equation tells us that, using \(Q_{l}g\) for all \(g \in L(X)\) then we can also approximate the quantity \(L_{t}^{s}\) to arbitrary precision, for any given \(f \in L(X)\).

To explain this approximate calculation, I would take a detailed example of a process containing two states healthy and sick, hence \(X = (healthy,sick)\). If we represent in form of an ICTMC, we get:

\[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) \]

for some \(a,b \in R_{\geq 0}\). The parameter \(a\) here is the rate at which a healthy person becomes sick. Technically, this means that if a person is healthy at time \(t\), the probability that he or she will be sick at time \(t +\Delta\), for small \(\Delta\), is very close to \(\Delta a\). More intuitively, if we take the time unit to be one week, it means that he or she will, on average, become sick after \(\frac{1}{a}\) weeks. The parameter \(b\) is the rate at which a sick person becomes healthy again, and has a similar interpretation.

Now to completely represent the ICTMC we take an example and write the generator as:

\[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) : a \in [\frac{1}{52},\frac{3}{52}],b \in [\frac{1}{2},2] \]

Now suppose we know the initial state of the patient to be sick, hence this is represented in the form of a function by: \[ I_{s} = \left(\begin{matrix} 0 \\ 1 \end{matrix}\right) \] We observe that the \(||I_{s}|| = 1\). Now to use the proposition mentioned above, we use the definition to calculate the lower transition operator \(Q_{l}\) Next we calculate the norm of the lower transition rate operator and use it in the preposition. Also we take value of \(\epsilon\) to be 0.001.

Using the preposition we can come up to an algorithm for calculating the probability at any time \(s\) given state at initial time \(t\) and a ICTMC generator(Thomas Krak 2017).

The algorithm is as follows:

Input: A lower transition rate operator \(Q\), two time points \(t,s\) such that \(t \leq s\), a function \(f \in L(X )\) and a maximum numerical error \(\epsilon \in R_{>0}\).

Algorithm:

  1. \(n = max((s-t)||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\)
  2. \(\Delta = \frac{s-t}{n}\)
  3. \(g_{0} = I_{s}\)
  4. for \(i \in (1,.....,n)\) do \(g_{i} = g_{i-1} + \Delta Q_{l}g_{i-1}\)
  5. end for
  6. return \(g_{n}\)

Output:

The conditional probability vector after time \(t\) with error \(\epsilon\). Hence, after applying the algorithm on above example we get the following result:

$ g_{n} = 0.0083$ if final state is \(healthy\) and \(g_{n} = 0.141\) if the final state is \(sick\). The probability calculated is with an error equal to \(\epsilon\) i.e. \(0.001\).

Now we run the algorithm on the example through R code.

states <- c("n","y")
Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states))
range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2)
name <- "testictmc"
ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name)
impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE)
#> [1] 0.008259774 0.140983476

The probabilities we get are with an error of \(10^{-3}\)

Continuous time markovchain generator using frequency Matrix

The package provides freq2Generator function. It takes in a matrix representing relative frequency values along with time taken to provide a continuous time markovchain generator matrix. Here, frequency matrix is a 2-D matrix of dimensions equal to relative number of possible states describing the number of transitions from a state \(i\) to \(j\) in time \(t\), which is another parameter to be provided to the function. The function also allows to chose among three methods for calculation of the generator matrix (Alexander Kreinin 2001). It requires the (Pfeuffer 2017) package.

Three methods are as follows:

  1. Quasi Optimization - “QO”
  2. Diagonal Adjustment - “DA”
  3. Weighted Adjustment - “WA”

See reference for details about the methods.

Here is an example matrix on which freq2Generator function is run:

if(requireNamespace(package='ctmcd', quietly = TRUE)) {
sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE)
sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) 
freq2Generator(sample_rel,1)
} else {
  print('ctmcd unavailable')
}
#>              [,1]        [,2]         [,3] [,4]
#> [1,] -0.024212164  0.01544797  0.008764198    0
#> [2,]  0.006594821 -0.01822834  0.011633520    0
#> [3,]  0.013302567  0.00749703 -0.020799597    0
#> [4,]  0.000000000  0.00000000  0.000000000    0

Committor of a markovchain

Consider set of states A,B comprising of states from a markovchain with transition matrix P. The committor vector of a markovchain with respect to sets A and B gives the probability that the process will hit a state from set A before any state from set B.

Committor vector u can be calculated by solving the following system of linear equations(StackOverflow 2015):

\[ \begin{array}{l} Lu(x) = 0, x \notin A \cup B \\ u(x) = 1, x \in A \\ u(x) = 0, x \in B \end{array} \] where \(L = P -I\).

Now we apply the method to an example:

transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5)
object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc")
committorAB(object,c(5),c(3))

Here we get probability that the process will hit state “e” before state “c” given different initial states.

First Passage probability for set of states

Currently computation of the first passage time for individual states has been implemented in the package. firstPassageMultiple function provides a method to get first passage probability for given provided set of states.

Consider this example markovchain object:

statesNames <- c("a", "b", "c")
testmarkov <- new("markovchain", states = statesNames, transitionMatrix =
matrix(c(0.2, 0.5, 0.3,
0.5, 0.1, 0.4,
0.1, 0.8, 0.1), nrow = 3, byrow = TRUE,
dimnames = list(statesNames, statesNames)
))

Now we apply firstPassageMultiple function to calculate first passage probabilities for set of states \("b", "c"\) when initial state is \("a"\).

firstPassageMultiple(testmarkov,"a",c("b","c"),4)
#>      set
#> 1 0.8000
#> 2 0.6000
#> 3 0.2540
#> 4 0.1394

This shows us the probability that the process will hit any of the state from the set after n number of steps for instance, as shown, the probability of the process to hit any of the states among \("b", "c"\) after \(2\) steps is \(0.6000\).

Joint PDF of number of visits to the various states of a markovchain

The package provides a function noofVisitsDist that returns the PDF of the number of visits to the various states of the discrete time markovchain during the first N steps, given initial state of the process.

We will take an example to see how to use the function on a markovchain-class object:

transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE)
simpleMc<-new("markovchain", states=c("a","b"),
              transitionMatrix=transMatr, 
              name="simpleMc")   
noofVisitsDist(simpleMc,5,"a")
#>        a        b 
#> 0.348148 0.651852

The output clearly shows the probabilities related to various states of the process.

Expected Rewards for a markovchain

The package provides a function expectedRewards that returns a vector of expected rewards for different initial states. The user provides reward values, a vector \(r\) of size equal to number of states having a value corresponding to every state. Given a transition matrix \([P]\), we get the vector of expected rewards \(v\) after \(n\) transitions according to the equation as follows(Gallager 2013):

\(v[n] = r + [P]*v[n-1]\)

Applying this equation on a markovchain-class object

transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE)
simpleMc<-new("markovchain", states=c("a","b"),
             transitionMatrix=transMatr)
expectedRewards(simpleMc,1,c(0,1))
#> [1] 0.01 1.99

Expected Rewards for a set of states in a markovchain process

The package provides a function expectedRewardsBeforeHittingA that returns the value of expected first passage rewards \(E\) given rewards corresponding to every state, an initial state. This means the function returns expected reward for given initial state \(s_{0}\), number of transitions \(n\) and for a set of states \(A\) with a constraint such that the process does not hit any of the states that belong to state \(A\). \(S\) is the set of all possible states.

The function uses an equation which is as follows:

\[E = \sum_{i=1}^{n}{1_{s_{0}}P_{S-A}^{i}R_{S-A}}\] here \(1_{s_{0}} = [0,0,...0,1,0,...,0,0,0]\), 1 being on \(s_{0}\) position and \(R_{S-A}\) being the rewards vector for \(S-A\) state.

Checking Irreducibly of a CTMC

The package provides a function is.CTMCirreducible that returns a Boolean value stating whether the ctmc object is irreducible. We know that a continuous time markovchain is irreducible if and only if its embedded chain is irreducible(Sigman 2009).

We demonstrate an example running the function:

energyStates <- c("sigma", "sigma_star")
byRow <- TRUE
gen <- matrix(data = c(-3, 3,
                      1, -1), nrow = 2,
             byrow = byRow, dimnames = list(energyStates, energyStates))
molecularCTMC <- new("ctmc", states = energyStates, 
                    byrow = byRow, generator = gen, 
                    name = "Molecular Transition Model")
is.CTMCirreducible(molecularCTMC)
#> [1] TRUE

Simulation of Higher Order Multivariate Markovchains

The package provides predictHommc function. This function provides a simulation system for higher order multivariate markovchains. The function assumes that the state probability distribution of the jth sequence at time \(r+1\) depends on the state probability distribution of all the sequences at n previous mon=ments of time i.e. \(t = r\) to \(t = r-n+1\) . Hence the proposed model takes the form mathematically as:(Ching, Ng, and Fung 2008)

\[ X_{r+1}^{j} = \sum_{k=1}^{s}\sum_{h=1}^n{\lambda_{jk}^{(h)}P_{h}^{(jk)}X_{r-h+1}^{(k)}}, \ \ \ j = 1,2,....s, \ \ r = n-1,n,... \] with initals \(X_{0}^{(k)},X_{1}^{(k)},......,X_{n-1}^{(k)} \ (k = 1,2,...s)\). Here,

\(\lambda_{jk}^{(k)}, \ 1 \leq j,k \leq s, \ 1 \leq h \leq n \ \ \ and \ \ \ \sum_{k=1}^{s}\sum_{h=1}^{n}{\lambda_{jk}^{(h)} = 1}, \ \ \ j = 1,2,....s.\)

Now we run an example on sample hommc object for simulating next 3 steps using predictHommc function. The function provides a choice of entering initial states according to the hommc object. In case the user does not enter initial states, the function takes all initial states to be the first state from the set of states.

if (requireNamespace("Rsolnp", quietly = TRUE)) {
statesName <- c("a", "b")
P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName))
P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2)
P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2)
P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2)
P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2)
Lambda <- c(0.8, 0.2, 0.3, 0.7)
ob <- new("hommc", order = 1, states = statesName, P = P, 
         Lambda = Lambda, byrow = FALSE, name = "FOMMC")
predictHommc(ob,3)
} else {
  print("Rsolnp unavailable")
}
#>      [,1] [,2] [,3]
#> [1,] "a"  "b"  "a" 
#> [2,] "a"  "a"  "a"

Check Time Reversibility of Continuous-time markovchains

A Continuous-time markovchain with generator \(Q\) and stationary distribution \(\pi\) is said to be time reversible if:(Dobrow 2016)

\[ \pi_{i}q_{ij} = \pi_{j}q_{ji} \]

Intuitively, a continuous-time Markov chain is time reversible if the process in forward time is indistinguishable from the process in reversed time. A consequence is that for all states i and j, the long-term forward transition rate from i to j is equal to the long-term backward rate from j to i.

The package provides is.TimeReversible function to check if a ctmc object is time-reversible. We follow with an example run on a ctmc object.

energyStates <- c("sigma", "sigma_star")
byRow <- TRUE
gen <- matrix(data = c(-3, 3,
                        1, -1), nrow = 2,
             byrow = byRow, dimnames = list(energyStates, energyStates))
molecularCTMC <- new("ctmc", states = energyStates, 
                    byrow = byRow, generator = gen, 
                    name = "Molecular Transition Model")
is.TimeReversible(molecularCTMC)
#> [1] TRUE

References

Alexander Kreinin, Marina Sidelnikova. 2001. “Regularization Algorithms for Transition Matrices.” Algo Research Quarterly 4 (1/2): 23–40.
Ching, Wai-Ki, Michael K Ng, and Eric S Fung. 2008. “Higher-Order Multivariate Markov Chains and Their Applications.” Linear Algebra and Its Applications 428 (2): 492–507.
Dobrow, Robert P. 2016. Introduction to Stochastic Processes with r. John Wiley & Sons.
Gallager, Robert G. 2013. Stochastic Processes: Theory for Applications. Cambridge University Press.
Norris, J. R. 1998. Markovchains. Cambridge University Press.
Pfeuffer, Marius. 2017. Ctmcd: Estimating the Parameters of a Continuous-Time Markov Chain from Discrete-Time Data. https://CRAN.R-project.org/package=ctmcd.
Sigman, Karl. 2009. “Continuous Time Markovchains.” Columbia University.
StackOverflow. 2015. https://math.stackexchange.com/questions/1450399/probability-that-a-chain-will-enter-state-5-before-it-enters-state-3?newreg=82f90b66b949495a91661caad24db915.
Thomas Krak, Arno Siebes, Jasper De Bock. 2017. “Imprecise Continuous Time Markov Chains.” International Journal of Approximate Reasoning 88: 452–528.
markovchain/inst/doc/higher_order_markov_chains.html0000644000176200001440000012117015137710470022540 0ustar liggesusers Higher order Markov chains

Higher order Markov chains

Deepak Yadav

B-Tech student, Computer Science and Engineering

Tae Seung Kang

Ph.D student, Computer & Information Science & Engineering

Giorgio Alfredo Spedicato

Ph.D FCAS FSA CSPA C.Stat, Unipol Group

Abstract

The package contains functions to fit higher (possibly) multivariate order Markov chains. The functions are shown as well as simple exmaples

Higher Order Markov Chains

Continuous time Markov chains are discussed in the CTMC vignette which is a part of the package.

An experimental fitHigherOrder function has been written in order to fit a higher order Markov chain (Ching, Ng, and Fung (2008)). fitHigherOrder takes two inputs

  1. sequence: a categorical data sequence.
  2. order: order of Markov chain to fit with default value 2.

The output will be a list which consists of

  1. lambda: model parameter(s).
  2. Q: a list of transition matrices. \(Q_i\) is the \(ith\) step transition matrix stored column-wise.
  3. X: frequency probability vector of the given sequence.

Its quadratic programming problem is solved using solnp function of the Rsolnp package (Ghalanos and Theussl 2014).

if (requireNamespace("Rsolnp", quietly = TRUE)) {
library(Rsolnp)
data(rain)
fitHigherOrder(rain$rain, 2)
fitHigherOrder(rain$rain, 3)
}
#> $lambda
#> [1] 0.3333333 0.3333333 0.3333333
#> 
#> $Q
#> $Q[[1]]
#>             0       1-5        6+
#> 0   0.6605839 0.4625850 0.1976285
#> 1-5 0.2299270 0.3061224 0.3122530
#> 6+  0.1094891 0.2312925 0.4901186
#> 
#> $Q[[2]]
#>             0       1-5        6+
#> 0   0.6021898 0.4489796 0.3412698
#> 1-5 0.2445255 0.2687075 0.3214286
#> 6+  0.1532847 0.2823129 0.3373016
#> 
#> $Q[[3]]
#>             0       1-5        6+
#> 0   0.5693431 0.4455782 0.4183267
#> 1-5 0.2536496 0.2891156 0.2749004
#> 6+  0.1770073 0.2653061 0.3067729
#> 
#> 
#> $X
#>         0       1-5        6+ 
#> 0.5000000 0.2691606 0.2308394

Higher Order Multivariate Markov Chains

Introduction

HOMMC model is used for modeling behaviour of multiple categorical sequences generated by similar sources. The main reference is (Ching, Ng, and Fung 2008). Assume that there are s categorical sequences and each has possible states in M. In nth order MMC the state probability distribution of the jth sequence at time \(t = r + 1\) depend on the state probability distribution of all the sequences (including itself) at times \(t = r, r - 1, ..., r - n + 1\).

\[ x_{r+1}^{(j)} = \sum_{k=1}^{s}\sum_{h=1}^{n}\lambda_{jk}^{(h)}P_{h}^{(jk)}x_{r-h+1}^{(k)}, j = 1, 2, ..., s, r = n-1, n, ... \]

with initial distribution \(x_{0}^{(k)}, x_{1}^{(k)}, ... , x_{n-1}^{(k)} (k = 1, 2, ... , s)\). Here

\[ \lambda _{jk}^{(h)} \geq 0, 1\leq j, k\leq s, 1\leq h\leq n \enspace and \enspace \sum_{k=1}^{s}\sum_{h=1}^{n} \lambda_{jk}^{(h)} = 1, j = 1, 2, 3, ... , s. \]

Now we will see the simpler representation of the model which will help us understand the result of fitHighOrderMultivarMC method.

Let \(X_{r}^{(j)} = ((x_{r}^{(j)})^{T}, (x_{r-1}^{(j)})^{T}, ..., (x_{r-n+1}^{(j)})^{T})^{T} for \enspace j = 1, 2, 3, ... , s.\) Then

\[ \begin{pmatrix} X_{r+1}^{(1)}\\ X_{r+1}^{(2)}\\ .\\ .\\ .\\ X_{r+1}^{(s)} \end{pmatrix} = \begin{pmatrix} B^{11}& B^{12}& .& .& B^{1s}& \\ B^{21}& B^{22}& .& .& B^{2s}& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ B^{s1}& B^{s2}& .& .& B^{ss}& \\ \end{pmatrix} \begin{pmatrix} X_{r}^{(1)}\\ X_{r}^{(2)}\\ .\\ .\\ .\\ X_{r}^{(s)} \end{pmatrix} \textrm{where} \]

\[B^{ii} = \begin{pmatrix} \lambda _{ii}^{(1)}P_{1}^{(ii)}& \lambda _{ii}^{(2)}P_{2}^{(ii)}& .& .& \lambda _{ii}^{(n)}P_{n}^{(ii)}& \\ I& 0& .& .& 0& \\ 0& I& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& I& 0& \end{pmatrix}_{mn*mn} \textrm{and} \]

\[ B^{ij} = \begin{pmatrix} \lambda _{ij}^{(1)}P_{1}^{(ij)}& \lambda _{ij}^{(2)}P_{2}^{(ij)}& .& .& \lambda _{ij}^{(n)}P_{n}^{(ij)}& \\ 0& 0& .& .& 0& \\ 0& 0& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& 0& 0& \end{pmatrix}_{mn*mn} \textrm{when } i\neq j. \]

Representation of parameters in the code

\(P_{h}^{(ij)}\) is represented as \(Ph(i,j)\) and \(\lambda _{ij}^{(h)}\) as Lambdah(i,j). For example: \(P_{2}^{(13)}\) as \(P2(1,3)\) and \(\lambda _{45}^{(3)}\) as Lambda3(4,5).

Definition of HOMMC class

showClass("hommc")
#> Class "hommc" [package "markovchain"]
#> 
#> Slots:
#>                                                                   
#> Name:      order    states         P    Lambda     byrow      name
#> Class:   numeric character     array   numeric   logical character

Any element of hommc class is comprised by following slots:

  1. states: a character vector, listing the states for which transition probabilities are defined.
  2. byrow: a logical element, indicating whether transition probabilities are shown by row or by column.
  3. order: order of Multivariate Markov chain.
  4. P: an array of all transition matrices.
  5. Lambda: a vector to store the weightage of each transition matrix.
  6. name: optional character element to name the HOMMC

How to create an object of class HOMMC

states <- c('a', 'b')
P <- array(dim = c(2, 2, 4), dimnames = list(states, states))
P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2)

P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2)

P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2)

P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2)

Lambda <- c(.8, .2, .3, .7)

hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, 
           byrow = FALSE, name = "FOMMC")
hob
#> Order of multivariate markov chain = 1 
#> states = a b 
#> 
#> List of Lambda's and the corresponding transition matrix (by cols) :
#> Lambda1(1,1) : 0.8
#> P1(1,1) : 
#>           a b
#> a 0.3333333 1
#> b 0.6666667 0
#> 
#> Lambda1(1,2) : 0.2
#> P1(1,2) : 
#>   a b
#> a 0 1
#> b 1 0
#> 
#> Lambda1(2,1) : 0.3
#> P1(2,1) : 
#>           a b
#> a 0.6666667 0
#> b 0.3333333 1
#> 
#> Lambda1(2,2) : 0.7
#> P1(2,2) : 
#>     a   b
#> a 0.5 0.5
#> b 0.5 0.5

Fit HOMMC

fitHighOrderMultivarMC method is available to fit HOMMC. Below are the 3 parameters of this method.

  1. seqMat: a character matrix or a data frame, each column represents a categorical sequence.
  2. order: order of Multivariate Markov chain. Default is 2.
  3. Norm: Norm to be used. Default is 2.

A Marketing Example

We tried to replicate the example found in (Ching, Ng, and Fung 2008) for an application of HOMMC. A soft-drink company in Hong Kong is facing an in-house problem of production planning and inventory control. A pressing issue is the storage space of its central warehouse, which often finds itself in the state of overflow or near capacity. The company is thus in urgent needs to study the interplay between the storage space requirement and the overall growing sales demand. The product can be classified into six possible states (1, 2, 3, 4, 5, 6) according to their sales volumes. All products are labeled as 1 = no sales volume, 2 = very slow-moving (very low sales volume), 3 = slow-moving, 4 = standard, 5 = fast-moving or 6 = very fast-moving (very high sales volume). Such labels are useful from both marketing and production planning points of view. The data is cointaind in sales object.

data(sales)
head(sales)
#>      A   B   C   D   E  
#> [1,] "6" "1" "6" "6" "6"
#> [2,] "6" "6" "6" "2" "2"
#> [3,] "6" "6" "6" "2" "2"
#> [4,] "6" "1" "6" "2" "2"
#> [5,] "2" "6" "6" "2" "2"
#> [6,] "6" "1" "6" "3" "3"

The company would also like to predict sales demand for an important customer in order to minimize its inventory build-up. More importantly, the company can understand the sales pattern of this customer and then develop a marketing strategy to deal with this customer. Customer’s sales demand sequences of five important products of the company for a year. We expect sales demand sequences generated by the same customer to be correlated to each other. Therefore by exploring these relationships, one can obtain a better higher-order multivariate Markov model for such demand sequences, hence obtain better prediction rules.

In (Ching, Ng, and Fung 2008) application, they choose the order arbitrarily to be eight, i.e., n = 8. We first estimate all the transition probability matrices \(P_{h}^{ij}\) and we also have the estimates of the stationary probability distributions of the five products:.

\(\widehat{\boldsymbol{x}}^{(1)} = \begin{pmatrix} 0.0818& 0.4052& 0.0483& 0.0335& 0.0037& 0.4275 \end{pmatrix}^{\boldsymbol{T}}\)

\(\widehat{\boldsymbol{x}}^{(2)} = \begin{pmatrix} 0.3680& 0.1970& 0.0335& 0.0000& 0.0037& 0.3978 \end{pmatrix}^{\boldsymbol{T}}\)

\(\widehat{\boldsymbol{x}}^{(3)} = \begin{pmatrix} 0.1450& 0.2045& 0.0186& 0.0000& 0.0037& 0.6283 \end{pmatrix}^{\boldsymbol{T}}\)

\(\widehat{\boldsymbol{x}}^{(4)} = \begin{pmatrix} 0.0000& 0.3569& 0.1338& 0.1896& 0.0632& 0.2565 \end{pmatrix}^{\boldsymbol{T}}\)

\(\widehat{\boldsymbol{x}}^{(5)} = \begin{pmatrix} 0.0000& 0.3569& 0.1227& 0.2268& 0.0520& 0.2416 \end{pmatrix}^{\boldsymbol{T}}\)

By solving the corresponding linear programming problems, we obtain the following higher-order multivariate Markov chain model:

\(\boldsymbol{x}_{r+1}^{(1)} = \boldsymbol{P}_{1}^{(12)}\boldsymbol{x}_{r}^{(2)}\)

\(\boldsymbol{x}_{r+1}^{(2)} = 0.6364\boldsymbol{P}_{1}^{(22)}\boldsymbol{x}_{r}^{(2)} + 0.3636\boldsymbol{P}_{3}^{(22)}\boldsymbol{x}_{r}^{(2)}\)

\(\boldsymbol{x}_{r+1}^{(3)} = \boldsymbol{P}_{1}^{(35)}\boldsymbol{x}_{r}^{(5)}\)

\(\boldsymbol{x}_{r+1}^{(4)} = 0.2994\boldsymbol{P}_{8}^{(42)}\boldsymbol{x}_{r}^{(2)} + 0.4324\boldsymbol{P}_{1}^{(45)}\boldsymbol{x}_{r}^{(5)} + 0.2681\boldsymbol{P}_{2}^{(45)}\boldsymbol{x}_{r}^{(5)}\)

\(\boldsymbol{x}_{r+1}^{(5)} = 0.2718\boldsymbol{P}_{8}^{(52)}\boldsymbol{x}_{r}^{(2)} + 0.6738\boldsymbol{P}_{1}^{(54)}\boldsymbol{x}_{r}^{(4)} + 0.0544\boldsymbol{P}_{2}^{(55)}\boldsymbol{x}_{r}^{(5)}\)

According to the constructed 8th order multivariate Markov model, Products A and B are closely related. In particular, the sales demand of Product A depends strongly on Product B. The main reason is that the chemical nature of Products A and B is the same, but they have different packaging for marketing purposes. Moreover, Products B, C, D and E are closely related. Similarly, products C and E have the same product flavor, but different packaging. In this model, it is interesting to note that both Product D and E quite depend on Product B at order of 8, this relationship is hardly to be obtained in conventional Markov model owing to huge amount of parameters. The results show that higher-order multivariate Markov model is quite significant to analyze the relationship of sales demand.

# fit 8th order multivariate markov chain
if (requireNamespace("Rsolnp", quietly = TRUE)) {
object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2)
}

We choose to show only results shown in the paper. We see that \(\lambda\) values are quite close, but not equal, to those shown in the original paper.

#> Order of multivariate markov chain = 8 
#> states = 1 2 3 4 5 6 
#> 
#> List of Lambda's and the corresponding transition matrix (by cols) :
#> Lambda1(1,2) : 0.9999989
#> P1(1,2) : 
#>            1         2         3         4 5          6
#> 1 0.06060606 0.1509434 0.0000000 0.1666667 0 0.07547170
#> 2 0.44444444 0.4716981 0.4444444 0.1666667 1 0.33018868
#> 3 0.01010101 0.1320755 0.2222222 0.1666667 0 0.02830189
#> 4 0.01010101 0.0754717 0.2222222 0.1666667 0 0.01886792
#> 5 0.01010101 0.0000000 0.0000000 0.1666667 0 0.00000000
#> 6 0.46464646 0.1698113 0.1111111 0.1666667 0 0.54716981
#> 
#> Lambda1(2,2) : 0.4771666
#> P1(2,2) : 
#>            1          2         3         4 5           6
#> 1 0.40404040 0.20754717 0.0000000 0.1666667 1 0.433962264
#> 2 0.11111111 0.47169811 0.3333333 0.1666667 0 0.132075472
#> 3 0.02020202 0.05660377 0.3333333 0.1666667 0 0.009433962
#> 4 0.00000000 0.00000000 0.0000000 0.1666667 0 0.000000000
#> 5 0.00000000 0.00000000 0.1111111 0.1666667 0 0.000000000
#> 6 0.46464646 0.26415094 0.2222222 0.1666667 0 0.424528302
#> 
#> Lambda3(2,2) : 0.3882533
#> P3(2,2) : 
#>            1          2         3         4 5          6
#> 1 0.40404040 0.16981132 0.3333333 0.1666667 0 0.44230769
#> 2 0.18181818 0.33962264 0.2222222 0.1666667 0 0.14423077
#> 3 0.03030303 0.05660377 0.0000000 0.1666667 0 0.02884615
#> 4 0.00000000 0.00000000 0.0000000 0.1666667 0 0.00000000
#> 5 0.00000000 0.00000000 0.1111111 0.1666667 0 0.00000000
#> 6 0.38383838 0.43396226 0.3333333 0.1666667 1 0.38461538
#> 
#> Lambda1(3,5) : 0.672876
#> P1(3,5) : 
#>           1          2         3         4          5          6
#> 1 0.1666667 0.09473684 0.1515152 0.1639344 0.07142857 0.21538462
#> 2 0.1666667 0.18947368 0.2727273 0.2295082 0.14285714 0.18461538
#> 3 0.1666667 0.04210526 0.0000000 0.0000000 0.00000000 0.01538462
#> 4 0.1666667 0.00000000 0.0000000 0.0000000 0.00000000 0.00000000
#> 5 0.1666667 0.01052632 0.0000000 0.0000000 0.00000000 0.00000000
#> 6 0.1666667 0.66315789 0.5757576 0.6065574 0.78571429 0.58461538
#> 
#> Lambda8(4,2) : 0.2745626
#> P8(4,2) : 
#>            1          2         3         4 5          6
#> 1 0.00000000 0.00000000 0.0000000 0.1666667 0 0.00000000
#> 2 0.34343434 0.18867925 0.6666667 0.1666667 0 0.42424242
#> 3 0.10101010 0.16981132 0.0000000 0.1666667 1 0.14141414
#> 4 0.20202020 0.22641509 0.1111111 0.1666667 0 0.17171717
#> 5 0.08080808 0.09433962 0.1111111 0.1666667 0 0.03030303
#> 6 0.27272727 0.32075472 0.1111111 0.1666667 0 0.23232323
#> 
#> Lambda1(4,5) : 0.2310635
#> P1(4,5) : 
#>           1          2          3          4          5          6
#> 1 0.1666667 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
#> 2 0.1666667 0.47368421 0.21212121 0.03278689 0.00000000 0.64615385
#> 3 0.1666667 0.10526316 0.21212121 0.19672131 0.07142857 0.09230769
#> 4 0.1666667 0.00000000 0.24242424 0.54098361 0.57142857 0.03076923
#> 5 0.1666667 0.01052632 0.03030303 0.18032787 0.28571429 0.00000000
#> 6 0.1666667 0.41052632 0.30303030 0.04918033 0.07142857 0.23076923
#> 
#> Lambda2(4,5) : 0.298032
#> P2(4,5) : 
#>           1          2          3          4          5          6
#> 1 0.1666667 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
#> 2 0.1666667 0.55319149 0.36363636 0.06557377 0.00000000 0.41538462
#> 3 0.1666667 0.13829787 0.09090909 0.21311475 0.28571429 0.04615385
#> 4 0.1666667 0.05319149 0.24242424 0.40983607 0.64285714 0.06153846
#> 5 0.1666667 0.02127660 0.06060606 0.16393443 0.07142857 0.03076923
#> 6 0.1666667 0.23404255 0.24242424 0.14754098 0.00000000 0.44615385
#> 
#> Lambda8(5,2) : 0.2303975
#> P8(5,2) : 
#>            1          2         3         4 5          6
#> 1 0.00000000 0.00000000 0.0000000 0.1666667 0 0.00000000
#> 2 0.35353535 0.20754717 0.6666667 0.1666667 1 0.39393939
#> 3 0.10101010 0.15094340 0.0000000 0.1666667 0 0.13131313
#> 4 0.22222222 0.30188679 0.2222222 0.1666667 0 0.20202020
#> 5 0.09090909 0.03773585 0.0000000 0.1666667 0 0.03030303
#> 6 0.23232323 0.30188679 0.1111111 0.1666667 0 0.24242424
#> 
#> Lambda1(5,4) : 0.2263456
#> P1(5,4) : 
#>           1          2          3          4          5          6
#> 1 0.1666667 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
#> 2 0.1666667 0.48421053 0.16666667 0.01960784 0.05882353 0.60869565
#> 3 0.1666667 0.10526316 0.16666667 0.15686275 0.05882353 0.11594203
#> 4 0.1666667 0.00000000 0.44444444 0.62745098 0.64705882 0.02898551
#> 5 0.1666667 0.01052632 0.02777778 0.15686275 0.23529412 0.00000000
#> 6 0.1666667 0.40000000 0.19444444 0.03921569 0.00000000 0.24637681
#> 
#> Lambda2(5,5) : 0.5369816
#> P2(5,5) : 
#>           1          2          3          4          5          6
#> 1 0.1666667 0.00000000 0.00000000 0.00000000 0.00000000 0.00000000
#> 2 0.1666667 0.52127660 0.42424242 0.04918033 0.07142857 0.43076923
#> 3 0.1666667 0.12765957 0.03030303 0.19672131 0.21428571 0.07692308
#> 4 0.1666667 0.05319149 0.33333333 0.54098361 0.50000000 0.07692308
#> 5 0.1666667 0.02127660 0.03030303 0.11475410 0.21428571 0.01538462
#> 6 0.1666667 0.27659574 0.18181818 0.09836066 0.00000000 0.40000000

References

Ching, Wai-Ki, Ximin Huang, Michael K Ng, and Tak-Kuen Siu. 2013. “Higher-Order Markov Chains.” In Markov Chains, 141–76. Springer.
Ching, Wai-Ki, Michael K Ng, and Eric S Fung. 2008. “Higher-Order Multivariate Markov Chains and Their Applications.” Linear Algebra and Its Applications 428 (2): 492–507.
Ghalanos, Alexios, and Stefan Theussl. 2014. Rsolnp: General Non-Linear Optimization Using Augmented Lagrange Multiplier Method.
markovchain/inst/doc/an_introduction_to_markovchain_package.R0000644000176200001440000010163015137710440024362 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ## ----load, results='hide', message=FALSE-------------------------------------- library("markovchain") ## ----showClass, echo=FALSE---------------------------------------------------- showClass("markovchain") showClass("markovchainList") ## ----mcInitLong--------------------------------------------------------------- weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ## ----mcInitShort-------------------------------------------------------------- mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ## ----defaultMc---------------------------------------------------------------- defaultMc <- new("markovchain") ## ----intromcList-------------------------------------------------------------- mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ## ----operations--------------------------------------------------------------- initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ## ----operations2-------------------------------------------------------------- initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ## ----fval--------------------------------------------------------------------- fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ## ----otherMethods------------------------------------------------------------- states(mcWeather) names(mcWeather) dim(mcWeather) ## ----otherMethods2------------------------------------------------------------ name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ## ----sortMethod--------------------------------------------------------------- markovchain:::sort(mcWeather) ## ----transProb---------------------------------------------------------------- transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ## ----printAndShow------------------------------------------------------------- print(mcWeather) show(mcWeather) ## ----mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"--------- if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ## ----mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"---- if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ## ----exportImport1------------------------------------------------------------ mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ## ----exportImport2------------------------------------------------------------ if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ## ----exporImport3------------------------------------------------------------- if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ## ----fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"---- library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ## ----exportImport4------------------------------------------------------------ myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ## ----cchcMcList--------------------------------------------------------------- stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ## ----cchcMcList2-------------------------------------------------------------- mcCCRC[[1]] dim(mcCCRC) ## ----conditionalDistr--------------------------------------------------------- conditionalDistribution(mcWeather, "sunny") ## ----steadyStates------------------------------------------------------------- steadyStates(mcWeather) ## ----gamblerRuin-------------------------------------------------------------- gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- markovchain:::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ## ----absorbingStates---------------------------------------------------------- absorbingStates(mcGR4) absorbingStates(mcWeather) ## ----renaldoMatrix1----------------------------------------------------------- P <- markovchain:::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ## ----transientStates---------------------------------------------------------- transientStates(probMc) ## ----probMc2Canonic----------------------------------------------------------- probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ## ----isAccessible------------------------------------------------------------- is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ## ----periodicity-------------------------------------------------------------- E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ## ----mathematica9Mc----------------------------------------------------------- mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ## ----mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."---- plot(mathematicaMc, layout = layout.fruchterman.reingold) ## ----mathematica9MC, echo=FALSE----------------------------------------------- summary(mathematicaMc) ## ----fpTime1, eval=FALSE------------------------------------------------------ # .firstpassageKernel <- function(P, i, n){ # G <- P # H <- P[i,] # E <- 1 - diag(size(P)[2]) # for (m in 2:n) { # G <- P %*% (G * E) # H <- rbind(H, G[i,]) # } # return(H) # } ## ----fpTime2------------------------------------------------------------------ firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ## ----mfpt1-------------------------------------------------------------------- meanFirstPassageTime(mcWeather) ## ----mfpt2-------------------------------------------------------------------- meanFirstPassageTime(mcWeather,"rain") ## ----mfpt3-------------------------------------------------------------------- firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ## ----mrt-weather-------------------------------------------------------------- meanRecurrenceTime(mcWeather) ## ----mrt-probMc--------------------------------------------------------------- recurrentStates(probMc) meanRecurrenceTime(probMc) ## ----data-drunkard------------------------------------------------------------ drunkProbs <- markovchain:::zeros(5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ## ----rs-drunkard-------------------------------------------------------------- recurrentStates(drunkMc) ## ----ts-drunkard-------------------------------------------------------------- transientStates(drunkMc) ## ----ap-drunkard-------------------------------------------------------------- absorptionProbabilities(drunkMc) ## ----at-drunkard-------------------------------------------------------------- meanAbsorptionTime(drunkMc) ## ----------------------------------------------------------------------------- committorAB(mcWeather,3,1) ## ----hitting-data------------------------------------------------------------- M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ## ----hitting-probabilities---------------------------------------------------- hittingProbabilities(hittingTest) ## ----hitting-weather---------------------------------------------------------- hittingProbabilities(mcWeather) ## ----simulatingAMarkovChain--------------------------------------------------- weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ## ----simulatingAListOfMarkovChain--------------------------------------------- patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ## ----fitMcbyMLE2-------------------------------------------------------------- weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ## ----fitMcbyLAPLACE----------------------------------------------------------- weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ## ----fitSequenceMatrix-------------------------------------------------------- createSequenceMatrix(stringchar = weathersOfDays) ## ----fitSequenceMatrix2------------------------------------------------------- myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ## ----fitMcbyBootStrap1-------------------------------------------------------- weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ## ----fitMcbyBootStrap2, eval=FALSE-------------------------------------------- # weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, # method = "bootstrap", nboot = 200, # parallel = TRUE) # weatherFittedBOOTParallel$estimate # weatherFittedBOOTParallel$standardError ## ----fitMcbyBootStrap3, eval=FALSE-------------------------------------------- # RcppParallel::setNumThreads(2) ## ----fitMcbyMLE1-------------------------------------------------------------- weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ## ----confint------------------------------------------------------------------ weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ## ----multinomial-------------------------------------------------------------- multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ## ----fitMclists--------------------------------------------------------------- data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ## ----fitMclistsFit1, output.lines=20------------------------------------------ mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ## ----fitMclistsFit2----------------------------------------------------------- c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ## ----fitAMarkovChainListfromAlist, output.lines=15---------------------------- markovchainListFit(data=mylist) ## ----markovchainPredict------------------------------------------------------- predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ## ----markovchainListPredict--------------------------------------------------- predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ## ----markovchainListPredict2-------------------------------------------------- predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ## ----test1-------------------------------------------------------------------- sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ## ----test2-------------------------------------------------------------------- data(rain) assessOrder(rain$rain) ## ----test3-------------------------------------------------------------------- assessStationarity(rain$rain, 10) ## ----divergence1-------------------------------------------------------------- sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ## ----divergence2-------------------------------------------------------------- data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ## ----rCtmcInit---------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ## ----rctmcRandom0------------------------------------------------------------- statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ## ----ctmcRandom1-------------------------------------------------------------- statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ## ----rctmcSteadyStates-------------------------------------------------------- steadyStates(molecularCTMC) ## ----rctmcFitting------------------------------------------------------------- data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ## ----mcWeatherQ--------------------------------------------------------------- mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ## ----mcWeatherHalfDay--------------------------------------------------------- mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ## ----ctmcd1------------------------------------------------------------------- if(requireNamespace(package='ctmcd', quietly = TRUE)) { require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 } else { warning('package ctmcd unavailable') } ## ----pseudobayes-------------------------------------------------------------- pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ## ----pseudobayes2------------------------------------------------------------- trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ## ----loadAndDoExample--------------------------------------------------------- weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ## ----MAPFit------------------------------------------------------------------- hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ## ----MAPFit2------------------------------------------------------------------ hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ## ----inferHyperparam---------------------------------------------------------- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ## ----inferHyperparam2--------------------------------------------------------- inferHyperparam(data = weathersOfDays[1:15]) ## ----inferHyperparam3--------------------------------------------------------- hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ## ----MAPandMLE---------------------------------------------------------------- data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ## ----weatPred1---------------------------------------------------------------- mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ## ----weatPred2---------------------------------------------------------------- W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ## ----weatPred3---------------------------------------------------------------- W7 <- W0 * (mcWP ^ 7) W7 ## ----weatPred4---------------------------------------------------------------- q <- steadyStates(mcWP) q ## ----weatPred5---------------------------------------------------------------- R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ## ----Alofi1------------------------------------------------------------------- data("rain", package = "markovchain") table(rain$rain) ## ----Alofi2------------------------------------------------------------------- mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ## ----Alofi3------------------------------------------------------------------- steadyStates(mcAlofi) ## ----ratings1----------------------------------------------------------------- rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ## ----ratings2----------------------------------------------------------------- creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ## ----economicAnalysis1-------------------------------------------------------- statesNames <- c("customer", "non customer") P <- markovchain:::zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- markovchain:::zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ## ----economicAnalysis2-------------------------------------------------------- c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ## ----economicAnalysis3-------------------------------------------------------- as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ## ----bonusMalus1-------------------------------------------------------------- getBonusMalusMarkovChain <- function(lambda) { bmMatr <- markovchain:::zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ## ----bonusMalus2-------------------------------------------------------------- bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ## ----bonusMalus3-------------------------------------------------------------- sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ## ----healthIns6--------------------------------------------------------------- ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ## ----healthIns7--------------------------------------------------------------- ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ## ----healthIns8--------------------------------------------------------------- possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ## ----healthIns8-prob---------------------------------------------------------- getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ## ----healthIns9--------------------------------------------------------------- rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ## ----healthIns10-------------------------------------------------------------- transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ## ----healthIns11-------------------------------------------------------------- mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ## ----blandenEtAlii------------------------------------------------------------ data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ## ----mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."---- plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ## ----blandenEtAlii3----------------------------------------------------------- round(steadyStates(mobilityMc), 2) ## ----preproglucacon1---------------------------------------------------------- data("preproglucacon", package = "markovchain") ## ----preproglucacon2---------------------------------------------------------- mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ## ----epid1-------------------------------------------------------------------- craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ## ----epid2-------------------------------------------------------------------- eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ## ----epid3-------------------------------------------------------------------- V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) markovchain/inst/doc/higher_order_markov_chains.R0000644000176200001440000000513015137710470021772 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, collapse = TRUE, comment = "#>") ## ----setup_2, include=FALSE, message=FALSE, echo=FALSE------------------------ require(markovchain) ## ----higherOrder-------------------------------------------------------------- if (requireNamespace("Rsolnp", quietly = TRUE)) { library(Rsolnp) data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) } ## ----hommcObject-------------------------------------------------------------- showClass("hommc") ## ----hommcCreate-------------------------------------------------------------- states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ## ----hommsales---------------------------------------------------------------- data(sales) head(sales) ## ----hommcFit, warning = FALSE, message = FALSE------------------------------- # fit 8th order multivariate markov chain if (requireNamespace("Rsolnp", quietly = TRUE)) { object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) } ## ----result, echo = FALSE----------------------------------------------------- if (requireNamespace("Rsolnp", quietly = TRUE)) { i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } } else { print("package Rsolnp unavailable") } markovchain/inst/doc/gsoc_2017_additions.Rmd0000644000176200001440000005244015137702633020420 0ustar liggesusers--- title: "Google Summer of Code 2017 Additions" author: "Vandit Jain" date: "August 2017" output: rmarkdown::html_vignette bibliography: markovchainBiblio.bib vignette: > %\VignetteIndexEntry{Google Summer of Code 2017 Additions} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} --- ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, collapse = TRUE, comment = "#>") ``` ```{r setup_2, include=FALSE} require(markovchain) ``` # Expected Hitting Time using CTMC The package provides `ExpectedTime` function to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in I$, where $I$ is the set of all possible states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations [@NorrisBook]: \begin{equation} \begin{array}{lcr} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{array} \label{eq:EHT} \end{equation} For example, consider the continuous time markovchain which is as follows: ``` {r,message = FALSE} library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ``` The generator matrix of the ctmc is: \[ M = \left(\begin{array}{cccc} -1 & 1/2 & 1/2 & 0\\ 1/4 & -1/2 & 1/4 & 1/6\\ 1/6 & 0 & -1/3 & 1/6\\ 0 & 0 & 0 & 0 \end{array}\right) \] Now if we have to calculate expected hitting time the process will take to hit state $d$ if we start from $a$, we apply the $ExpectedTime$ function. $ExpectedTime$ function takes four inputs namely a $ctmc$ class object, initial state $i$, the final state $j$ that we have to calculate expected hitting time and a logical parameter whether to use RCpp implementation. By default, the function uses RCpp as it is faster and takes lesser time. ``` {r} ExpectedTime(ctmc,1,4) ``` We find that the expected hitting time for process to be hit state $d$ is 7 units in this case. # Calculating Probability at time T using ctmc The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. The Kolmogorov's backward equation gives us a relation between transition matrix at any time t with the generator matrix[@dobrow2016introduction]: \begin{equation} P'(t) = QP(t) \end{equation} Here we use the solution of this differential equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. In this equation, $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the conditional probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. Also to mention is that the function is also implemented using RCpp and can be used used to lessen the time of computation. It is used by default. Next, We consider both examples where initial state is given and case where initial state is not given. In the first case, the function takes two inputs, first of them is an object of the S4 class 'ctmc' and second is the final time $t$. ``` {r} probabilityatT(ctmc,1) ``` Here we get an output in the form of a transition matrix. If we take the second case i.e. considering some initial input: ``` {r} probabilityatT(ctmc,1,1) ``` In this case we get the probabilities corresponding to every state. this also includes probability that the process hits the same state $a$ after time $t=1$. # Plotting generator matrix of continuous-time markovchains The package provides a `plot` function for plotting a generator matrix $Q$ in the form of a directed graph where every possible state is assigned a node. Edges connecting these nodes are weighted. Weight of the edge going from a state $i$ to state $j$ is equal to the value $Q_{ij}$. This gives a picture of the generator matrix. For example, we build a ctmc-class object to plot it. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` Now if we plot this function we get the following graph: ``` {r} plot(molecularCTMC) ``` The figure shown is built using the $igraph$ package. The package also provides options of plotting graph using $diagram$ and $DiagrameR$ package. Plot using these packages can be built using these commands: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { plot(molecularCTMC,package = "diagram") } else { print("diagram package unavailable") } ``` Similarly, one can easily replace $diagram$ package with $DiagrammeR$. # Imprecise Continuous-Time Markov chains Continuous-time Markov chains are mathematical models that are used to describe the state-evolution of dynamical systems under stochastic uncertainty. However, building models using continuous time markovchains take in consideration a number of assumptions which may not be realistic for the domain of application; in particular; the ability to provide exact numerical parameter assessments, and the applicability of time-homogeneity and the eponymous Markov property. Hence we take ICTMC into consideration. More technically, an ICTMC is a set of “precise” continuous-time finite-state stochastic processes, and rather than computing expected values of functions, we seek to compute lower expectations, which are tight lower bounds on the expectations that correspond to such a set of “precise” models. ## Types of ICTMCs For any non-empty bounded set of rate matrices $L$, and any non-empty set $M$ of probability mass functions on $X$, we define the following three sets of stochastic processes that are jointly consistent with $L$ and $M$: * $P^{W}_{L,M}$ is the consistent set of all well-behaved stochastic processes; * $P^{WM}_{L,M}$ is the consistent set of all well-behaved Markov chains; * $P^{WHM}_{L,M}$ is the consistent set of all well-behaved homogeneous Markov chains[@ictmcpaper]. From a practical point of view, after having specified a (precise) stochastic process, one is typically interested in the expected value of some function of interest, or the probability of some event. Similarly, in this work, our main objects of consideration will be the lower probabilities that correspond to the ICTMCs. ## Lower Transition Rate Operators for ICTMCs A map $Q_{l}$ from $L(X)$ to $L(X)$ is called a lower transition rate operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[Q_{l}m](x) = 0$ 2. $[Q_{l}I](x) \geq 0 \forall y \in X$ such that $x \neq y$ 3. $[Q_{l}(f + g)](x)\geq [Q_{l}f](x) + [Q_{l}g](x)$ 4. $[Q_{l}(l f)](x) = \lambda Q_{l}f[(x)]$ ## Lower Transition Operators A map $T_{l}$ from $L (X )$ to $L (X )$ is called a lower transition operator if, for all $f,g \in L(X)$, all $\lambda \in R_{\geq 0}$, all $\mu \in L(X)$, and all $x \in X$[@ictmcpaper]: 1. $[T_{l} f](x) \geq min(f(y) : y \in L)$ 2. $[T_{l}(f +g)](x) \geq [T_{l} f](x)+[T_{l}g](x)$ 3. $[T_{l}(\lambda f)](x) = l [T_{l} f](x)$ ## ImpreciseprobabilityatT function Now I would like to come onto the practical purpose of using ICTMC classes. ICTMC classes in these package are defined to represent a generator that is defined in such a way that every row of the generator corresponding to every state in the process is governed by a separate variable. As defined earlier, an imprecise continuous time markovchain is a set of many precise CTMCs. Hence this representation of set of precise CTMCs can be used to calulate transition probability at some time in future. This can be seen as an analogy with `probabilityatT` function. It is used to calculate the transition function at some later time t using generatoe matrix. For every generator matrix, we have a corresponding transition function. Similarly, for every Lower Transition rate operator of an ICTMC, we have a corresponding lower transition operator denoted by $L_{t}^{s}$. Here $t$ is the initial time and $s$ is the final time. Now we mention a proposition[@ictmcpaper] which states that: Let $Q_{l}$ be a lower transition rate operator, choose any time $t$ and $s$ both greater than 0 such that $t \leq s$, and let $L_{t}^{s}$ be the lower transition operator corresponding to $Q_{l}$. Then for any $f \in L(X)$ and $\epsilon \in R_{>0}$, if we choose any $n \in N$ such that: \[n \geq max((s-t)*||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)\] with $||f||_{v}$ := max $f$ - min $f$, we are guaranteed that[@ictmcpaper] \[ ||L_{t}^{s} - \prod_{i=1}^{n}(I + \Delta Q_{l}) || \leq \epsilon \] with $\Delta := \frac{s-t}{n}$ Simple put this equation tells us that, using $Q_{l}g$ for all $g \in L(X)$ then we can also approximate the quantity $L_{t}^{s}$ to arbitrary precision, for any given $f \in L(X)$. To explain this approximate calculation, I would take a detailed example of a process containing two states healthy and sick, hence $X = (healthy,sick)$. If we represent in form of an ICTMC, we get: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) \] for some $a,b \in R_{\geq 0}$. The parameter $a$ here is the rate at which a healthy person becomes sick. Technically, this means that if a person is healthy at time $t$, the probability that he or she will be sick at time $t +\Delta$, for small $\Delta$, is very close to $\Delta a$. More intuitively, if we take the time unit to be one week, it means that he or she will, on average, become sick after $\frac{1}{a}$ weeks. The parameter $b$ is the rate at which a sick person becomes healthy again, and has a similar interpretation. Now to completely represent the ICTMC we take an example and write the generator as: \[ Q = \left(\begin{array}{cc} -a & a \\ b & -b \end{array}\right) : a \in [\frac{1}{52},\frac{3}{52}],b \in [\frac{1}{2},2] \] Now suppose we know the initial state of the patient to be sick, hence this is represented in the form of a function by: \[ I_{s} = \left(\begin{matrix} 0 \\ 1 \end{matrix}\right) \] We observe that the $||I_{s}|| = 1$. Now to use the proposition mentioned above, we use the definition to calculate the lower transition operator $Q_{l}$ Next we calculate the norm of the lower transition rate operator and use it in the preposition. Also we take value of $\epsilon$ to be 0.001. Using the preposition we can come up to an algorithm for calculating the probability at any time $s$ given state at initial time $t$ and a ICTMC generator[@ictmcpaper]. The algorithm is as follows: **Input**: A lower transition rate operator $Q$, two time points $t,s$ such that $t \leq s$, a function $f \in L(X )$ and a maximum numerical error $\epsilon \in R_{>0}$. **Algorithm**: 1. $n = max((s-t)||Q||,\frac{1}{2\epsilon}(s-t)^{2}||Q||^{2}||f||_v)$ 2. $\Delta = \frac{s-t}{n}$ 3. $g_{0} = I_{s}$ 4. for $i \in (1,.....,n)$ do $g_{i} = g_{i-1} + \Delta Q_{l}g_{i-1}$ 5. end for 6. return $g_{n}$ **Output**: The conditional probability vector after time $t$ with error $\epsilon$. Hence, after applying the algorithm on above example we get the following result: $ g_{n} = 0.0083$ if final state is $healthy$ and $g_{n} = 0.141$ if the final state is $sick$. The probability calculated is with an error equal to $\epsilon$ i.e. $0.001$. Now we run the algorithm on the example through R code. ``` {r} states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ``` The probabilities we get are with an error of $10^{-3}$ # Continuous time markovchain generator using frequency Matrix The package provides `freq2Generator` function. It takes in a matrix representing relative frequency values along with time taken to provide a continuous time markovchain generator matrix. Here, frequency matrix is a 2-D matrix of dimensions equal to relative number of possible states describing the number of transitions from a state $i$ to $j$ in time $t$, which is another parameter to be provided to the function. The function also allows to chose among three methods for calculation of the generator matrix [@freqArticle]. It requires the [@pkg:ctmcd] package. Three methods are as follows: 1. Quasi Optimization - "QO" 2. Diagonal Adjustment - "DA" 3. Weighted Adjustment - "WA" See reference for details about the methods. Here is an example matrix on which `freq2Generator` function is run: ``` {r} if(requireNamespace(package='ctmcd', quietly = TRUE)) { sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) } else { print('ctmcd unavailable') } ``` # Committor of a markovchain Consider set of states A,B comprising of states from a markovchain with transition matrix P. The committor vector of a markovchain with respect to sets A and B gives the probability that the process will hit a state from set A before any state from set B. Committor vector u can be calculated by solving the following system of linear equations[@committorlink]: $$ \begin{array}{l} Lu(x) = 0, x \notin A \cup B \\ u(x) = 1, x \in A \\ u(x) = 0, x \in B \end{array} $$ where $L = P -I$. Now we apply the method to an example: ``` {r eval=FALSE} transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") committorAB(object,c(5),c(3)) ``` Here we get probability that the process will hit state "e" before state "c" given different initial states. # First Passage probability for set of states Currently computation of the first passage time for individual states has been implemented in the package. `firstPassageMultiple` function provides a method to get first passage probability for given provided set of states. Consider this example markovchain object: ``` {r} statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ``` Now we apply `firstPassageMultiple` function to calculate first passage probabilities for set of states $"b", "c"$ when initial state is $"a"$. ``` {r} firstPassageMultiple(testmarkov,"a",c("b","c"),4) ``` This shows us the probability that the process will hit any of the state from the set after n number of steps for instance, as shown, the probability of the process to hit any of the states among $"b", "c"$ after $2$ steps is $0.6000$. # Joint PDF of number of visits to the various states of a markovchain The package provides a function `noofVisitsDist` that returns the PDF of the number of visits to the various states of the discrete time markovchain during the first N steps, given initial state of the process. We will take an example to see how to use the function on a `markovchain-class` object: ``` {r} transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ``` The output clearly shows the probabilities related to various states of the process. # Expected Rewards for a markovchain The package provides a function `expectedRewards` that returns a vector of expected rewards for different initial states. The user provides reward values, a vector $r$ of size equal to number of states having a value corresponding to every state. Given a transition matrix $[P]$, we get the vector of expected rewards $v$ after $n$ transitions according to the equation as follows[@GallagerBook]: $v[n] = r + [P]*v[n-1]$ Applying this equation on a markovchain-class object ``` {r} transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ``` ## Expected Rewards for a set of states in a markovchain process The package provides a function `expectedRewardsBeforeHittingA` that returns the value of expected first passage rewards $E$ given rewards corresponding to every state, an initial state. This means the function returns expected reward for given initial state $s_{0}$, number of transitions $n$ and for a set of states $A$ with a constraint such that the process does not hit any of the states that belong to state $A$. $S$ is the set of all possible states. The function uses an equation which is as follows: $$E = \sum_{i=1}^{n}{1_{s_{0}}P_{S-A}^{i}R_{S-A}}$$ here $1_{s_{0}} = [0,0,...0,1,0,...,0,0,0]$, 1 being on $s_{0}$ position and $R_{S-A}$ being the rewards vector for $S-A$ state. # Checking Irreducibly of a CTMC The package provides a function `is.CTMCirreducible` that returns a Boolean value stating whether the ctmc object is irreducible. We know that a continuous time markovchain is irreducible if and only if its embedded chain is irreducible[@Sigman]. We demonstrate an example running the function: ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ``` # Simulation of Higher Order Multivariate Markovchains The package provides `predictHommc` function. This function provides a simulation system for higher order multivariate markovchains. The function assumes that the state probability distribution of the jth sequence at time $r+1$ depends on the state probability distribution of all the sequences at n previous mon=ments of time i.e. $t = r$ to $t = r-n+1$ . Hence the proposed model takes the form mathematically as:[@ching2008higher] $$ X_{r+1}^{j} = \sum_{k=1}^{s}\sum_{h=1}^n{\lambda_{jk}^{(h)}P_{h}^{(jk)}X_{r-h+1}^{(k)}}, \ \ \ j = 1,2,....s, \ \ r = n-1,n,... $$ with initals $X_{0}^{(k)},X_{1}^{(k)},......,X_{n-1}^{(k)} \ (k = 1,2,...s)$. Here, $\lambda_{jk}^{(k)}, \ 1 \leq j,k \leq s, \ 1 \leq h \leq n \ \ \ and \ \ \ \sum_{k=1}^{s}\sum_{h=1}^{n}{\lambda_{jk}^{(h)} = 1}, \ \ \ j = 1,2,....s.$ Now we run an example on sample hommc object for simulating next 3 steps using `predictHommc` function. The function provides a choice of entering initial states according to the hommc object. In case the user does not enter initial states, the function takes all initial states to be the first state from the set of states. ``` {r} if (requireNamespace("Rsolnp", quietly = TRUE)) { statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) } else { print("Rsolnp unavailable") } ``` # Check Time Reversibility of Continuous-time markovchains A Continuous-time markovchain with generator $Q$ and stationary distribution $\pi$ is said to be time reversible if:[@dobrow2016introduction] $$ \pi_{i}q_{ij} = \pi_{j}q_{ji} $$ Intuitively, a continuous-time Markov chain is time reversible if the process in forward time is indistinguishable from the process in reversed time. A consequence is that for all states i and j, the long-term forward transition rate from i to j is equal to the long-term backward rate from j to i. The package provides `is.TimeReversible` function to check if a `ctmc` object is time-reversible. We follow with an example run on a `ctmc` object. ``` {r} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) ``` # References markovchain/inst/doc/an_introduction_to_markovchain_package.pdf0000644000176200001440000135470715137710471024756 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 5571 /Filter /FlateDecode /N 89 /First 764 >> stream x\[F~?B/'W6>6cח]zZo?_VM7Feeef})Ifù(,AEfsWd`ԳE^Sx=;ea}a5[ -!/йDEWQԏI n!Yb #8!R  NYY o 78 l zV4/TYy3vZuJH4,cOnAW7_Pp﫺~AѴͼi32}[J<w1Ĩ̾"zg]5s8"Hpsċ;F;9HuulGm<ֵnnX>p-Y>V'B=@4;^u83JcI_8_S=M]eWdR]7d 6Ȣ_]%m軗 kfhw rԯ^kq,iٺ= y$>7uSm؆wI3*WM۵I}aG˹9<]Οݨꅜ?YY$h# QӵrՉ:ujerEaDG!1Zt%:lݏE:\_:өJEgT:tL.ѩ|~LMLI1HiS{xt$Mi EHSTHTHTH4Hi)DbHK!EZ X-R'z6H :uedSmM]h 'hcmL{#'0z4iϪ}ӪbkSI&/IY$qxZzTtt!?a2.eՑHԇ,ϗe[<ӳ ye3BƃfX/ӓ{UNm #߫t=.SM ~|?^BّawELVqGb4`V;~|V+aEpN`iПA%U, D ; 9Vnѡj(ƼVrF ݗ&QKG2v@"cwؗ.=b);cs+9A3jdCV`5 ^W.Ƭf/+6bl֌+vf&em-Xw5*ֽiؔfo[Y- _F8Ji^Q~íojP?7W/Ӻm!A ո󬫮IaE\scb.mTk޽;g?F嫪\hȕ=* UߩUvV</ϖt_Pr0A7:4^M-<zLٷ߭f]=G UtaL\˅]}ϵz'DQ˵ A^&!~Z q/D`;(#'S pY9IAEY1poL3C]u|z~n cUݦpMzݻ -%6P.b{=B_~_ag}o#?`UܪԑM%uriPJm? Y ? y . nJƷI0>ce݊`XvްОp\vW^U+@0ЊUۺxK͂-OhgG{vcV+c6O ~+S,^;*h ;DC X 8uPz"4v~Nב*C?2]Y bM†5@4N=lb;9|̓9eCmz}q|"rT}YlE}9ǰ#wj U1(d.D@kt񧟱x":/$Vq<CIqv[X Pȹ"}DO^ .D?C>C=Cl)+#g).]uȥaʂ4eRd#M>eNK<'WٓQ gpl.26w|l2a+ڀǻU,.ނO`CGamB\Q*kK \hjz8|ϸm&=ߌ6#CPiEI+A'b+UE'늜"eۜ ad%RpwMNn'OI ?=ϧɠnwewv()F=`ԧǫծOV Wy6y2<; a =6ϽGܙ8wzot!-ڝYo IYx}Hb[]uMw&je]MagQ5b Q\kÒR>Z ASe_>ӑkrBy>.o޼ɯ|Uo6S/q[[r㶛LC[vӼ&7M L^\\3lŰ{)C|rzІg+f VYz۾DNθ7Wy\4*WyCw9bύ~oϛU_6 3;wLN)@;CҦԾSO"yM@2pܱŇeە]\t9ćV;@$89p|0Q(T7ӶE])j*fxqa-hs77Z¸ܻ}/fI.>8>D6?;4tr𪼬q=.Q \=wD Lľ92z_uD&Voo`"[y{+@pn(a"WyXNf ̘9wF>У!$*i{- }Yp~=7>7?:><==ٜ6NBp F̐kuLڇoaS=-g\K[D! ]x(0ARЎ <g‹P7"==VB},P$Q58=HzB|LAu*/+ϡl\i٣qW/IM9:mQvn+R4]cO,^5Fd 7L Ȩ Hv_ޮqw\$`Tm<Hh=$G.m`ۚIZXl ;?va&rݧ8?`-$Ϲ)B))y S{~3ذ8neMmBBBhۣ`҆@1Ma=T,ӆ υ8` ) B~{1v&C`x XP9M5ws;@֋<|276bz˴mwe,l& ǵ\;4.Qy~8Kt0EN/K?.\dpUf Ýڅ2,&Z/ipU/_XFU2TDq^(BM6XuIXy;It49}}kZQ mX3yuf:"'Ka6}Vw•exXͯK(u$GRXS0O)Gcu%cEH|@UcAqW{nzHpjyNT3;v騫u]>G!}0WUjSpd4Iy  Ճz|qsy};Wۓr<- GJ=7]igVzܫ/(6wm M'?H 1gGB}d_7ӗk4-L]> stream GPL Ghostscript 10.06.0 discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions 2026-02-01T18:45:29+01:00 2026-02-01T18:45:29+01:00 2026-02-01T18:45:29+01:00 LaTeX with hyperref The markovchain Package: A Package for Easily Handling Discrete Markov Chains in RG.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón endstream endobj 92 0 obj << /Type /ObjStm /Length 3504 /Filter /FlateDecode /N 88 /First 813 >> stream x[[s~c2:dI3ަeh&^if eo)s#4,3d,X9fa3 IG@g6xt!SҁaJk)S^]JVtg5xLJۥb;KjB01 4“c%xf)р#+x('8J1ڑlt^uYhoI% F8buGĠ'&!kBXxD)6QO{gݑyvp#%x/e:vǛ;g_vxͼdwhl.~;ZpqǛ}~]] ON)G|q. w.!1'J;rlWlZqnQ ]Z D'cJp/'}cR8ѓ@(`(^j'ޓӕjdZ"@)1p")k)ItuY% f4rR#:W`sZs|(f~ClAQD_Q-[3jidj5c$55cW1T8Jd -mZ~HQDv5hX.Q&pebU!SǨRdX"d_})J0 runR(1Y ֲPPC\z\{ w5.D,piLLV2DWK8T1TX"VhS&lơGuF͒aէ\(z3WUb>ֶf"6\"AtuA U$iE$)/pcUõ{. B;͸\X k*r<艟Ib+37'E8aO.Fq,'qFH| f]K?&uE``KgZv k/d9NJ0ȉsL&IZ4"'gN6f٭2dU| 6Erf-nܩfJ>Gc $A9ee? ϒm6'|$InseneO )$%g$L2ekqSiUj4qO_dƜm.ۯ1*pl/QfyL iQ^?Kʶcp.\lwYԪS1Y͸d,R- ]u7c ɣ[4Tk:dx0S _/*Snte:[%'2JT$~"+(GQsyF@x"fehP;b"=W{I_~]ڪ繆Q#cxZLh`ID/5.ZYuQ*6ypJYUπltY%V>3e VpEqonf/̌$UޯQkuF5BeYgJYvX S)z5^idrPy}݌))}?>b)Yz@O:_ʙu3*`@WSs-(1:چ咞+BYm"SФŕ8"+,ǀ!Oy ~"6I7$Q*9;=H?*&U$f$E;,nhۏr 3kAs}]Z˲Zõi(vƁ)j&JBK+=)86D-͠3U$unl!a+*c -U5X%f̷Y8 j-۠FBܢjfUɜNM~iӮщaj^l>79̓wNAp)yh+hͬp'!aa2WAMCS5体qAnM^9ޞzݸ~f7Y"Sj] iglˉ1:pⴣ 5N^{/WA^o6zOa{:WoVzAPq nǸvQKq:fR_Ci99>DÚ7M쓋O#Jݬh8p`b"T7f&uAW75'n{8 ;9͡L E"/iK悍b3h^e~..uq^h99J(P6%X)vKB| NMIE!́3 ̲whI]XB n|p ZT׳j44ЫQ DINY{p jvl186J~0rM/*60{KД < 808s&%8h4$WT[ TVB́,wrAqNnc|a+z_q/{]24Lx|&q5@܅G`N9SO 0/>_ܣ1*&\Jp ^.[jPO nX І ?9!#xxbނcfŷ-6Ks ue|9=MKڻ-O$Sh&pEP-R/a (9g{+v##vO}v9txm>\o5Oq%ʩ:އ#_襸ww}<]k3Л +c[D;Z]zzy ч <tݻ/Vկ|=~!>tw8`X+n~GWA[cd&SL=NW&i~endstream endobj 181 0 obj << /Type /ObjStm /Length 4606 /Filter /FlateDecode /N 89 /First 843 >> stream x\ks6~~>3;!;03wg&6ə~eVW[g>/@REr,"R /;^Dxr&bR9|k}[roLI- qr&s Ї_$V \( ͔vdL˴-Ǵg&wt1"לk.U~%sŜICƝ\g,59朰yܣ S4_) 褐CMySf[j2f!W JQgAӼ7XiB H3uU4gN4(+DDm#;[ظL # o}ja 1@-ƐR+ƐVһC:- D1TN4PCEbҺ* IZFe,1ԥ*Tc >13aH\)%)t\7PFh>c`D\)F 4]yFc cXAm0U&& kAc]1lճ rQ@b-]%-1 `^|^j[߲VMmoW;3u9(ƥ*-n1.b9^ R7KH\-nr+IbrZ5z?p.K&+IzRR+5eMaYWL+ɩzr^1USuO?]tݟ6Ē'u.L{ר.^^SbR+SvQLWeӶ!IbUן7oi5fZ,|8Q%ZV"j׋e47Edޜ#>.~Wj!L@Fϙ:}G+-b5鯤Fɼ,Z8G.Dwg){C2m:AX|Nm`M -mM}C=M@F][01$`bx @a FP 3H4Rj#%^S_0M?ܽ Z91_o=O)?O /x1*U'٬ ^^K~,K~+o i1;;/\_rY-uKꊯz-`2 yմ\1Y'±U)ϩU5-U$r1/f~y:xV+K`&!u9\.wD0;yw^`ŬAt}`Lxj{K ~"F i~rlep{Ëp1qZNnf򖗿StX%̯7p`eY r1c?k}l'2dd|({1ᨗN>zlՀܫb?_}>xCt}gh=>]t礭 zvIR F+z=EGCm;"}+D1"WU~4Ph|r\*?ύ&:E_Gznswt24a.~/*`yߤ_xzӯt5,=/=C:􅇬b4rjR7|H XWybs1jV2 ".>or;9:1DÑ@.ۮV{xXb`[CxJ!ZQ1bi\ݡOJ6ԏ>rSSh`l],2b RQ~)׭T:ELJ,9/D_Ta,bVLxh~UVg'v%ȝ//Kk,yR~둨Q {em'n/v0?W?;,QzL]Nwaq~F7RHkc})o*Q'jdf7uu=ڳH/#rqXR3^\߫jVº\Ӯ;n-AډCELndɋ7__ނ+{D&;#ІQ_G7~_l`Mpf͆y3C2Ѡ [|F9d.h<VPNDK5h}H}9m)J˳rsw\Ϫ#cM5;[w֝mͳWe1=Vk{GcGڛN:l&o4067; d o# 9~@[V50R#mԱ]Id-=\։9nDs(Y./ p:S#RHj,{8 bJ2*.&)9ʏ'&=~`FulN~X4Ba.q-¸Ggr ADgjz! * ;SZ*; JhB@Ӂx ì `@X4[oQc0*.;L>=@ Tuu`X ;݀#(c%9E;@ ,3!<A'g,,g<Qoɔz| . oR>!j!2f-G,UcEl#CX j#(p܆)GW26p5B.鷌%Tp.فlc(nhgֵ V2q+`:A`! UBf!: +j!vgTKCV?slٸbÁa&TlO'\gbVRƣ>[ %<= 92,l' f+nٳwݒ鳷jt6?l2Te~_LjIJ֯OləlG|v/9="jLٿE>Blo<${KȉbHҤjFQRZe&Ui#]^ǶrQ>I,UԔ;$,ۓ5:a]x&tRh:-5 jS.Rx>F.GgNzĉ.\|s},-A)礉2talq&1nR{n9/c_&Stƺ9;'Gg~(ZVjM_ru~wȷh &nB%i[!zP]ñ)h5A56 lZQǚ",J&!%FKle$ 8t^( Oh_W/EuqQ—&h YDlfNdsSE !ag01۪:F*f<3*LѦ"u+lű^\:2uV5u3-k}ƷJUlD3*Mea%lVm^5)ۮReRT]z[ʌg2YfHg9 A&> stream x[mo_-p9|'p8 ˡI.ri|P썣Ɩ K}vZuRƾpI>>3:hFwZa ^xq "ڄk4iJO#o ުZ[ LZ(vc L\+ &,  c<1d羒&h光CB7&WªE4u+-lШFYHsDAN[@P7opp;I8K虔p3Dq24إ;Euq .kEIxJx5aJ07vЊeE0AqQW򘌎"KUyQ.&c\B U*,JJPzףKD?GW˫;{5 Ʀe+Xp)-[@,Zf۟} 1?/F&[`߷i,w'YLYLT/0/ۤ%&-=ܰ-_?ŗ⦼O/|T eEʍo0n1sjz IӣEy[) T?[+{~0zJL bv$/ZĞS~KfO8톧24WK"ZSV>G)ָ !X^Cg|Ka\X`eZ#qIxQ)~/F⼸(Si\\a=Wu1v5+(;v8<U [HKQ͍]]g럏ft2mo9aMRMDXK0||P0q”/|P;Xc@ *@}Q}lR{Bꔎ3VsO$u0FKp<ǚr|˔ul>p.aޭE-xMZ臮Ye|/eTIX@P؄$uq  J_(5,phV#q2Dz<1E[Oi5'G[n^'endstream endobj 361 0 obj << /Type /ObjStm /Length 2484 /Filter /FlateDecode /N 89 /First 816 >> stream x[ko_-|MIl{۴3u@Rl}Q#9=FQ9#y})|ʦV/mPQ%&UGm(K2Jdo\o0ilI-k9g8&)3VMY r`L6б.1fD^ _e εrdvĂ_\K7^I$'(ࠤpQ` VNI*x.Z 0})*D,*xr`Ǫh,7NE XYE":"F'?EfT,+5[Tr ̎UI2)p*ER:bESjEݨsV o#wWͻ3 %hG[ aS|d1M>M/ 7ΰ@IcggjSyI?6xS9mіEMm]mRteY ?]kZ ԓ#L]TEjk+[d,5tXYJxobY~@1}f(:bŝjk`mޢtCTi>Y7D4(lݏްݐy9DװInȷ";Յ,vI*vry<uΧfZi3.53sՠ ԌxΨk~/ȂK~Mjo~I;AA~T5sZwUd .n} ónJ"'g3sMxBO!ђb!6ý͢hZa\ŕ$CPK-| \MpN&FtIO-(6 uFƣ}V* ?6D*6vPY$6; 4'C]v 2 Vh*hϨ$͘'t8 ڱe`F⺅ יŀ}Xy*hp:-ŢmO?tv6Ҳ {yb![]wٰa5Ä%Y9٣88u9B07yە_92u66vSWV -ޮ[ߒ+hVC\aX#t=جmЁϰ8}(av`qGL*aA^eg\E*cĬvHʴXŗV:hnDx:C(~򶒚Dd R_e *<2 P6cۣaznTRpwea]꽜qsֱ<Κ=7o~_:|7էqrn4R_F'hl4ͿQl$^NGi{Ѥ] fMkfxlu%a TtD{%7=~|ތ/ wɸ3NJ7]g+Y1̺'{ltx\;qu/uoFi!7͇։^c~ڿ-ߛe)r^ޣ}^F'u{j ܦZ]qHhO8Ckd3ԟX並9ÆsYw)YV> stream x[[o\~c\  'u"n h~P,T i8>ߜ^x/\07ù]pQQe Wb&".炶muDSp$@Q0ICXuNc>ٱ*S̩:Ί%sp\29 2\D,Nb>:IsrRkbh1sd   %7osƅ cAjFmT F NcJN`&4[g|jt &RKl̬%.`\J2N.i 5De㍆rllP\|B@) ڕȕHF` t%g]lPlTr!DHV&Ffȅ2Z@2d1#6O8 l # Nx2P.x*O j ďȾBdLTĄbG<&H$hJ<[M`Ka6@P5r)̎N[j-VZA0|fΧ skKkqԈ#ZC"8MF#(125xnyӈ\#Mb4uYCGo6-X%Ѝր,*8åMˈ F_||5z?{}qo~nx;`ɛ1L^fx;f,b_?rwt6zw'7ח^݌m׷W~7y!|2O::#̦6'wH;xǩqO3 Y-8A/_Tׄ/TR)F4N;C?uUVj-vnN</ߍۢa6jqs?]y[/xxsc}_A ٥@͠';[$ x~ SC+-Mbˢ>Qrd9g)yxὟA)b@o gn°oeVI٧|@˙<#cb`EON-[1Uo9(^z@Kgam/RxS*a@3 $X% R3p MF炴)YmM::EV):%a>K̓_YbzAVx{[6?n=oΡQ$e1J'3[rʯcP%zqA_)]"CH8q镚zH~t_ǝ͡Wv4ŖSD U91vl$[a.%nWOI61?fħR׺I`!hR KANO 4"." `)*8g%p !&jȖFA49_;[M<k)4ăc5 f9_p e6jb GadOj9||2#Gƺ`#-"^vMZ/ CVWnHpEI|R]ȧťq\״\g14h2d;VaPЪ(4.'ج1hvxp[Ni0u1x\Ѵp`- p!XVP9m{Z3k1g@CcǓ"uWD0sRNLo  n O 9xAckb;F62/asRFiltGQؽpx􅣠 B=^ {IF^eU8;xcV&`m`pևЄ 6U`rFKX>ٮpЉ H S 1utdnbkvB*.6fX;uiejvx-'HEW~e@W~ @GovLo2g endstream endobj 541 0 obj << /Filter /FlateDecode /Length 4485 >> stream x[msοjJ;7xΑl'Nr$VTݐ$'f9pu7실Hrifh4~Y]Yo6~n6?\]YOM5ۋ8G΂|%+ejs񋸺_U][i溮qJZV!4^l7S-Fܷݖ扟cPZY%uUഅV&%YL \kw05 NWߵj+H SW֩0[ %vwij/:ظٯAJJ\@J I,6J1H yBREZV(N_ƘH)Cn*¨ q#?tĎ^saeojIr,>30e"X#n~G؏WsFJSoWO$Fs{VuNmbC+^1=[2M%O<:yA}AD.i`p=ԡn ݟq Zj__TwW% f`?\x0Ek_)5\X3[:jBe9 `d@^?iJH*"'8!B6>(4/4{#lP lR PM6K *2>L롍Sq(zvwYJb 7@4- kVmINIYJQJ%A v*L;ւ~%yPy/Kke\ᦤGVRE{b]Kkf Rb n{"Ol g\im@݃ę&Ͷ/u!y,gEmV*uI@kL@5E#}XSnc؂V*ힲ|*fP_..nFblvr 4܉?D!1{8BPJ B?E3 QnɾKϠΧcv ®%,lkk&HFObЉ*ڑtDl*eTFʈ@U.b@Wvl <0FG1bnepS9!%OabT9s*E`NvJ/*Ex1d"6ne}2~=.H ($|KIۛ2nR. (OG mT? 40jCʦin5Wzݤ!&)v&~ob-c+Hdd #G ڬoȵA#w>\$}A&28548RH_iˑEGr]@)jP"PMp$UeTΩ&mXuڴcJ S5{ 6l0v<25ޔj}DqkZbn,a!i¡%l!vh!%/.\',^j}1ـ KJ Sq@q: NJ?t7wcu\u"ք@QWF0 'Siu\QpAPIrlH62bm ;*k ]>t. d5N1!Q^ГM##`ܝ@ ܦvu Pg1B1Rxnⵎmt_O;l^Npx15WYiQtqZܔ lrc/8x>PںB-jQsOFٙB˰^f?uilˊ,C9uI9Wi'q$7E/IGpjxL$._:RQ9P6]F)ABYF,Y[JpLYSN0F#!j7qkN0i`~׉&MRQ{/}ޫt@RH,*!lJ鮉 ŰR\^-H|V:Qmە:*JcWp a[l~Sz([*9$69]fSf3s,Yʰ~L3L3!a9+G"Q_,aT,S(E2? >ۢ' e,7("d(y54jJrW)/'Jv?wqr3ʹMA~L0A1Ƣ, e T1FO < |mq @m)eFXqY[%N%)#gv՗0. 2 mu Yck19  ۛѡV_ "K3<~`0$ }V<|,_}!ޓ7F/Ý '3Im&D ֗ZK|y׻\d鳕,u;_T&aWfI6ͲPoL]A왩`Ɔ?v -UZ3~Uɸ,x=GV޴zAxdY`e

QǴ;(RFu3i V~y$!ti';m?M(S\_ {CSye%NP*%kP`cioeM "qDk^fq˴4`=Lvt,e'*"B;Ъ#ScSlGtæua aGG 8rq5 MH,c˞PU$vAX};{[Z Z3Kz(/O"R`㽁_BEdK95w{7n!wh2tk;rz_ 5a@4+Gͤin{8rBtP>`\#Gg&R7ZׁyP+"Mb +WLEIC \H7rQ!mtwEiƦ"Cy7ꤍxx>Fì I/3:)-pA,߂K*!Mn7b4perLuat8 $ Re-|௓['ʭo0JDg3Z#SnJ/:#yu*1R(@BXxϣ]\Wry&QG%xRi}Xwa=,8ݝbV3}a v+xW|pYf+(_A[R[oWt ձý."kP:/9 |DAc:n4m "vc>y?JiO귎Hq<ț56M1:&9M!X1x4gw擬rRjzYBb5qy8Nh3^E_NçQN7r %Z؆3)p\LTLͩcNPCM F4;Ͻ*HyP'> stream x\7r`4ߤ A+ 7igf=;+YקHvlc&bկ,eQ7>ѯ|1ѿeZ969:}IJQuBMNG ?>yw798:mcqͪxu9sqvJֹf6_ws_G=wig?)<ҝ6R"Hzϩj۬/ӻ !ygf d۱Cfck=USezq?mx<0k2P3d:ra3Ev[ɚ03'`vj^`339Oٳ{wXz1} "g$bʻӻ:sIZ( I휉}2C]/G^OdhN 4БOo@##2߁IKk׷yȘQM 3x438ĉ͗*&-\kn/0j3%ʧE VF{1/vuY]E.Z">ߠEVKeuaR絲Ʋ|n\oEkXOe[>zY ~G{L+͇xس$NH`QU\jlnf6$ H%%\%kTsvI&>Zxy.cwWXiI0MQ) ǍnU'آ9?WXu!tH TZ5rQ>5ao6TӢ*7̫Q>W9"в䭏ɒWWt!7:B!crZ "b{Rq( & sSW~_\'`J@zhH2(du` =w^;.N-AbfV4yWqٚ}Ҭ*ÒE#f"Ne KZb%7"ׅD=8@S"DkZ󯤌O^b- 6lY'|rb`U~XkLr0p8ٔX`#1rwgAs``io&ϲGl8&kޮz/ fmpZ4p;>oO^Bg[mt:isgϟ5dS 2&SrUְyK Cя+ 94Dp=<@7W;PuJȄ4QtC"k@X?oJ eqCfcPܐmq|kM* fVUڇ9sN<T*A|R4UPfC[]BDVDnz/]Meqb(h[\#B]Q @yY~='X\k 4:!#6i BŠIס<(\Pnۂn)&"?ؿ7V `'邧FYɒט[&vXN",Ef}="[ȪfځOw/j'@7CL8i/G|Io71[`w%&ۄ_,_ s T k QX*_y0 n LM!`~ifq)=~u .&/H%pRo-%QΔ[:ܚ˧SǑ1J1'|b$  K,W]^a| AwD=~da hWiLPLFB1ttFXF*mέB<1ȓX^1D)r[!NH73:wf[43[@#.zh}]pAcLs%#QCRjrN"X4/{! ;$EM5Tޛ< _bn:ٚb>S"o!/fayEg%EQ9WoPe@8ם*ژ<5~RK{r 7^bߍX("-(FAV:5J~Rv@E}ue; р[=۷A|L<ǪEvÝxtA#r9M^/3/ЃG)_Ʃb~ scg~OFOSYJh `~?q9˳[kL7߬MK Ժ$Ϸ`|hcJ/ Iz߮rYCayW9*D8]D[ e8d[RD]^Jϕ 4=&s#6H6Hv2<`_uZ&FByTҎE HZ`B6~-KZ bڕ{EQZ@n>w(N%@rSg*o2|@ppN{ 0SJ R U˕`Ѳ.6ݡzkf{38Jr  #;݃1f_jOrK%]r&u x齍=RyĢL_~p*VH$9u񾴫evqlڢMd [~l} 6I{i)@C$x0 \<[^dx ;=ڬgu'mS2Ɩe V| .}gHv #ő,yDt1ũ"g_-fPW{x\vwXc6QSْen噖kiMrt:m[2W 6AwCq8?&)]I}*T"}1+AHʥ_[sZc,ה*T5Po5E`@#K_O *9CF'Z۬O ^XrW pٔطO_ DO/JZ %l>LxOp0CbΕozkZ=(!g+|.4(Q>xW?>_(iS< *[€w1c|<Š Րc\(c*&eUNA8x/i ~R v@RYf<ٱɭ~zU, ':TT*LH:6g@?~tLA ٶF<g@?~tL* 9#S葪 eZ =Ho Vrqendstream endobj 543 0 obj << /Filter /FlateDecode /Length 2215 >> stream xYKs7%T{0{HU䒲S^=@$-& -Y+iǔ_YB,9/o,]_? U͗EX#X: rs\0|Ze)8nJ¢yOWٖ)݊('gU}F?oM9V Lι/{jxVWkdd?ZF+fRd~<`1Re]pmgKXi؜(AK1Z7t8g-(0˵2Eim0*Vk9 C -4'f]ZQ)| vFc8׎}#h EG,5A,KQXe!RI.$㒽kUWn㩥Q`<OFH013ݡ]\ABJuEy&Q3+KQ8bO^q "]MJA*8-+L ;4n(b VMw)ƲǺ * E"E?#/l)Sdo4:}W}UIGX9%XxC,,sX@,A(Htsoڪ}"[tϫլh3M-}d81k;k 'l]UؽGGq/4鸟4Z#UC36eܯ hQ.ǠX>w`8%3Ôxt%`Lr^z4Ri*j %5%@Z 92})P]mR*C *UJSa Q_g?TD)<j{ud݇%z7yL+L ^:3jpf|`?OeSPڭ✝۟my:nNY%Hwtyl(R )HVnҎ/3}sA?DPI vRi9em.BWДD6SIhV8")Q}N8'T@@ź!C Vj 2ͱy)y-stԢ$Q訄U+T }ۄ$aerjˌ◿ZGwa_,ߣ!p502MSNukZ"bu7W#pږVByl.n 9pl@TJ3 WR KQ tkf>THgRUL8$r za]1;KNQMqw޸c b G1hQ yD71Y] ?W]SP[ћLB{l!"LV頧(ҡYqbZ{MEeBvf&zk}% ~Nai܏}G Idzd19BH'Nwِx!8 ^4^b䑭 (l)+{[At0ȫ9gr_$sd,_ܴygwX$!L@z^Wg X 1k\CSlySieYWc݌rHuS`:Ցu|Fwո[vEBK@398O`=. X"$J{e03ۅĢ,8`c@F @F@3aYM5!V|zzRzZ(@@nck|x:x3"BmzQt ܦJ-ЋqUP ^ xKuѴaˋ li >+n=x1_O$[0-^> stream x]n@D{}@7k5v"A(d0ErXNb[{oq}zy~/ncڭ;_im׏ulݩ^nMvO>Ƿal ?Kv߇COϏޗal0ͱ|6Oܿ8?ݏ<b[u16Ƈ]u=O}u1A11Ku1ET#SbaF!"1BA0F#a! B#Dc0P]E)<Ь {$***$***$***ޢ{A9XV` lZr $ x ||`AH!ApءT]'E@/< hુ_rp.FcrNYfK ri`R.-LʥSI`J0)L &҂)\Zk)ڹƏum͋Ӿ]ow뢯:/endstream endobj 545 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5472 >> stream xX T׶ fZ *8a4Qc爢l7" 80}hYFDVDqHDhD1FO%+ϗ*`5Uu})cчd-^ ꘙ!ҩLG,_IT,f_=p@F.yo%&"?`ӫƌ88@)8<*28I`NGѓNaN>!~N~N7rZ5geKVxzz063|֖s"<"n}.Jif?Oe^A˃CBǿ:akOƠc 3Y eBg1EޙѪ] oh `Bŀೀ\tK䝂ud11 cJ.̘uj^@ąIףhjJ"YiFșlQʎ(~*%7 Y`p9[5pqXYj4'CfOm/>ObOoM ;(ÐIl6*ԑ8x ]SzJRA* }wy X?/h|>2+VptKkF(_Pm4:DKjzCBaf2 ukD%&9tkQX w޲5;l ؟Nf4I)>/[k| x48 xJBiњt}$8-XD<Ϋޙ6#O,_ OÞT]f! 8@D;[3kaf#@KO]B6@ =@,mguY&~"Qٚ$M/AM<Êf6a^dqvMn1!ǘoE9W{^ LKqmOOMP!feޓ1ngolfbFr^$O'K eUsUB.8̤ pȧ1?&Źۥ* #+%K3Z[j&ZʎqBUEP f~De$m}LM]mycYR|[e]n)Z7~ GV2~J5=*rmA&\F% .'S=-lIYIYghFڶh"B֮Kwjw'a.mqā@$:YV2yoJS$شdC{X'd$E gwJ^bqqKJ8݆`9 qeL+FY \ת֙k Y9ZLjc ke.ZJYA<$4R-Fer hv)` 괴]i.owaV9ԿdЏp?\6bԐgzI]i^@V#I]GIx$梎 f ՝O`ؚ2M4~v -ȌLC:TptJ(ۛu}% Sazp_Qt]"D _ZԄӚL@ӣ(dN`4lS֫z2x=Gp 9r ç͇N^( \&q_WdL3 ;Ԥf> stream x]N@D{ Kh(@>#8I IK/ܭ7t7/+za-_ڗX>ڶ0vKxW~>5~^P/k740*/kjgi;TRM'p%f wY%=K@'e xGg Yd DM,$D$DZDS9&rMarZ9l\VN+OY&"Nup9`\<1\c`|Wguye>d 8K8f 86t % & % & % & % & % & ?!a(g< v_ֵg}x.4f9-;endstream endobj 547 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4489 >> stream xXiTTW%VݫTp (Fmq8( @PQE@] "3"S uDcbDŽD^S:k{?: jqoqo>2fF& Y+jk`7W-GH4j4 ߯<3J9 !d(c&yD΋N-7LD{I7EDFF/lx0~|T}PHXQׄد]k׊+W;O^0Hoܾ% AC6^e۔N9a2+7,ƁYŌcV3kZfx3Df=3g0 Of>Lg2˘0ƊጒaƂ@ac2ۙ/dd2[a12,<VK ,ZJ+_ aQvʸ9~NoiN#2tB=a%fˆ҆/-*?\[`|/yWHSvcB˧32!M_(a4 FidU ME{%t7 ȝ}`sBK˖5}>W<{u,V2 ͦ芝͍F͗ɰs ;třXlWieypR൵]gq b /bH!L; aLBDoIDt^R@w NqDžyޔYd2q"/#YӪ1yÁ*c)gf4m㭞B-`ц,YPj3"6j\.qj6iZ.krXao_u^P]!v^I+eЪ,=ҫٙ><:'x/f0۴QSud-%BdG3`TP*l;.:@rq^4N'j3f i 6]d6p3ت:m:Pڣ"1kD% \}yE+CA%B68' P-Hc*⊣j:0T#eu2$ 9g/bwsGJ‘}p_6Q$/ 8OC?%x-ᤢjTY%8ar6M'Oh4AUCk1]ڜR TW(b 2!AIP,ٛ+%[AFdC(t8C P;xB>H> xoh ~ * DYtLZϒwїg}87zց"C(c/ph`,d}9nd)!N;BgTĄ}yUQ<sˊ1JÍPG`8v8XXdKKJ9jSkDg/԰&&82/5@=9a7-X,P8:Q ȢWdm}EFZST)c?1qE;|#TUN]ZT=$ťxܻ]%s.*G~OGSBy(QM%LyXzZ0f׷z~`*)c185U@@pbrК9#%pUUw\,QiIі6G4LJ,K̝]!+b1]gLڝQakz֟Vm6+[H_M@O]p:Kj"PtEvxecm[[Q &60SmQ.3pԧ<9ۙ0D.8$=CV$,*о}R hN=P.PF8Gb8PchhmxW\엤ֆ |mtQb`ܑaAA UQa r6E$k=`)N׏_^$+^FPY祶@ѱ]gtFCƤђB.S4F aT=uvµaF-k51u2 8HQrXz_3w*d-ʜ'Y}MuZ$'?Oiwp^#?li1b)gjoFީKMU&y%jҍN* ^P>t!,<^Ov3qSګ)i{[!VW%@7|W-zpA{Bf3M H#^V aK=lĤ #Mxˁ P@r /p_<8 < (T*mJBdwFvQuRg;:9WTܹ"87J)+O&4U>X#մ0s|H`r .3 X :]Z-VP* 8چܥj\Ȼ4'?9ws.ߘ䝀C .-ܫ_7%$g75 .+3kOnIꆦ&@0'(g5{3;稯1!hsLgD`Xϔ+P) z>b{Jr2@b{t3N]_U vAaK͎8kEd;~AqHwEGʤB"fo(2+Niؘ3^hFٔ;/?)ƌ1|nu{rhwR8\n8 ?_%,`3 Nd%j2ZJ={]T |XYP a5Q >;Gt\=~ а?'`n0?Yendstream endobj 548 0 obj << /Filter /FlateDecode /Length 690 >> stream x];nP^DlxӸH$ـD=*, \d)R 1?~>|y9oa=v~.}fq}4]e}s۽폚R?/-C~ݝ~8um~:wq_?(3PX):{SWT `ͦ&k{)T7uϺ4S͇r7MAɠaWThԉun jMA%Ѐ3@4 84b4 8]SPwFg hp&N: :p.N: :p.N: :p.Xk΅:.밺N:kp!`A`\p!`A`\p!`ЄDAQ`ϊJ'IBb!IH"$ S$!1~$$O)Bb!IH"$ vQB%EvQb?%wIoZQqGk[%o[EoZk[%o[EoZk[%o[EoZk[v|SZ1滋WμUs@뵟n:u>o /Jwendstream endobj 549 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 9433 >> stream xz XS㬤yYu (2D0c DqY[uUZmm>tsh?h|<޵ֻ޵6ʬ %,V s8+5: vcoxs!fN̰>P 0 P;zĉǍ#3 [/wu nzUWDۭG[yxzZxZh`;[5a_o/P4むM 7/ (xqȒХa—Gt[)w_sj[5^vwq]ebͮIѓL;mgsFpQ t8o' `)j0eKͤPkYPj-5Sj5rFQj Z@6Q qfj5B}DMS% ZFMSSTj%5ZEMVS3T_ʑRbʉzPʒRVm?52R"XS2+eMuQ5IS7 5#&N9Rfz-2KplY+sW{KŸwt`JuԞz>5ggL}g-,\-\Mop?(eW4=^~˰dv֋Z?fa Fl4dQCF.3q1[Č;vXq!T۫5Lh!hugӶžҤ(2)}'ota|VM 6KFD'8J=t2%8L^egnR,P㺥g7@0hv3ȞG`.QK*Z|R727p)ddGf* ? ʼnh%6I u87!NCU S#O)t* VӅǺNGvցPZй ^Z |5s,y^| ||5>Ƕu-)dmWC6ė("o+}ɊӱÐFTK?¢>"uqdцCYW䘜^+Ch -6{4~VAU "| zok/<uE=00'gMΥEF8U~}pUTZ`: =x$l ohdwM `L/P(cTJK2t41 tjUjL9Ěh/m(? EAI7P7px1eȁFㆋSj~sfjgyCђ,HQ2N&!3xԌN {."xxL <` @_*Cqt!+T*M`7x| Xa[<O.M6h5!F֑"x 5pVh먠R܇+B$qu`x(7ӕβ6nG'l:NjrD6%RB Bh{Ǫܺ8;qHN+΢I("U$g8)[z6zU"ضu"37ґ/ ͪ '*@ V;S\Ekc/QKaXdywوE#و~^#fxGr{h~)ÚX99TG{ZPu!WK:QΆc.@ χ#^|a!1>,XѨ5MYCW`zdBӉF4ƨ7 [Q61]:y4*|G6(39%"4*`c0 o)}gYa__{RlQ\_Io.,³TG`pz~rv #_4="]DBPPd̩a GL0ᦁ}lDCWn}YXÓ4ABn igbz܎1T&ꀢ}f[ Jas-j"G-3PzO'vٖa41u.6:V L.~lOФdaժxڣ vXj]P& R~͈oT>gه:WL $eXJT@WT|}OI\!Mܰv^XH^L/Rtjo O aq|r`6^ Q8IBx5'aW슦ֽ{daܳɄ|Z@7^%>$~M)`B+#,~N<؟x7 ^G\ O YdBCLx_i43G0ZlzEU7 N~C>r^}-7Iyg|(1g~Q3R(PGs N?d 06"۵G dxieɂ#'Xq{ԓdx4=&gV]fmho(nc!zDVwZϾV&vmUw ~wɞn esLb.ZȥrCI&kBa _4١ *bΐb3T̅F(" ԁArRh(FҴh]R.'3S%#,+9dFxDtiy?zO!E_=RW)D4a |s`Qs_E'x,9lO V*vȑ&hjָ` 03;.+YQUTRTU+;]̱&<+mΌX1yk)f S*] ( ,k.٭43@(zk %a)~qz&EK_z7HDvDB:J F5a+$}ER2 PTDF95yr-V\n]p,/dO%ކBn2.Ɋ5De2?~y'0'@SM'3/kdnj0AEU%?!;S~HkP,>2Moʺ W/ 9 M]b?lAM,sJ!5CyUz}(qZ)%5&įU63RTvS&0C_xȽG2lqpgML\vM9o>HPPD"\3^ɐdN܃ZDW3&tD5_, ,\ZL hK׻T8K%XH$rhOQ$;q?0$+BWРP}ԴA.SN+;*nfw85WF?C&_Sj i+ҩIZM\դ2] G^o̯-huFX.)>Yz8C5gPȠI1&BCHX,[$8NF=0UMa3Lt~%l@O%!rSQwD-Gcd͓" cpQA?i Y3tN_Д2+!jm-i]ѴXLoEG9pW_=zPlJLhADeu~I9[.,^#+dRk0k>_$͡QD}R6#̜̊w`uGEh2 O'[;w AYl9ybZɹؚmJ7WV/a o0EqwfusU $j}RMPCUYaIc~u10Ϯ_\&SZDxRzBXfgnlAVqzaJ] Leښ|L|EQVoW0"Tu8,4j擇6ԳELYu!{/j6ʋѠEo&e_P<C:i@':\ ŭ_K ;FV[ !9Y=u\;X`DG5o! tRuz/,߃7{?=N*&K#YRwrDP9}J0@r#%|_gf3" -S68!->yf* E_u{4Vb:GIeh{JloH 5]4YU3W>V^r*Z/ݟ^3IWo;E.A 4VevgxMl"mW6Ƀf\p'Pj2qwbgiP,ڴpcٵW4PSt.;ЧԞ4mT~@]dTA |\H;0O]Fynp !ɇ5?uER()%yPTKAҕ'fQhIPoDQQ:('H>QCSk( J.UB)!z0+گ Wĉ;=8;(%[WIz<<0Ϛ>Hk2=! |sHIDmR}>.@frR4-UUF<sjlW$<85r|jubgWDtRj5 $Ԥz,LzRJb\Ͷ@]QeKjuUj4!nI^=϶~?laAxIsbY")w5S]fFŊ]c`1ʿW\pSŨFkY0V:sQ\t~, d)!N;;P|Wu MnhNΊb[6궇ԡ*-S0ojSR N6.Lf ATP@tt`JK,a8`f&ɀl^0n0]!{ LMgm 2]PvՊ7%ĺL[>qk ˦`cPĻ.h26΃f>Ȓ(Fdq~'8I~qJdRWh"b/VHds'L?º dШٲ2U`u:CE^H2x;࢟V /Dp:{64T$ŁȻI N^ze &n򃍺eWvXd4,`<4#UF%Ȃ?ձ[(˫¤kx/U$-2KFD$ΐnػ?̘sm0DVJ ak:v-lb9lWk4!ɉI@HB~ÒQxbgyeuI&eKC|'>[ד'?{Ȅ0ɛLiBZT m}Q^Igx(&8^;W"EHu5ouaL d@-PnGXx $yl)~}KLmqbZ̔*Kjk7䦩Se.;lMw,MЫIx8Hcu6ԗp[3mqאmA[ 5۝yϛ5j ;;ĥҩG#3=|h#=\9- * /J#J䤋V$.\JKH$R[@Xv$e-O-A RFbEē%D4}˛ .wp-e(؝x @z \EdHݷH*J` lgwA/?SðyF̓)/nאXYDV*`\U1,ځ݈d^IODd2fNá,R ;i&LŭǑ8%P8Ybߧ N5餳G̚?^sjrdc;O̠̿k]~ݶ-޸=ݪ=P\t֕ 6d 3.r#,=*zsDXJJIHKPX+WfzF6X#RAe#9|Zic-(`٬)!A^Kqm#oȌ =٩<%-w NvZE>h-qE Po7s[*2f a7.XH9ٸil~~< }:~ GGly ɺ7{Mj8},9~Ԫ%YyˢӶCͦHm2De &jƉeN.du4 7*qʒغ-(boۢSBTTJ5$e?<'&m\O1LQe} gQ$0 Q\_wt=$5O$(EVx*^Gx ^X!r`De2qCohΗ!%]ц;w5Ze3txl-ݲ '"uޫ_eiDlOT+7*#Yh [5GebJ jy?#;x/)}7?o^l6IyxY<8,4Td,c~U>t-BYCՊvXov\Н"q=;h5f)>~1r">J'U&cd.x-YQ̎Ⲳ5oq/#h}8=Maޜq|rxԤ\yRU*; Rcd(Ry`-{ 2BdZɕo1?/-+XTj4jgw;/w0;TS|@`,"wz=< c?c2UQq]J^:r9A6*jd+9$aC*~:?ewNxb?Ibҵ5SR )^UqqjI1īy)YYz}ۂ\H_\9CғkV{gU`XlԿu9M$##H VKV W@<2ۑ9~cU3ND⧵A{?C6LjĿvlhk&`$)?HX|ud[r|D@XNTQM|y]R@%+-,Dil7u=:^t=ztiz>5-GO_R( endstream endobj 550 0 obj << /Filter /FlateDecode /Length 324 >> stream x]n@ D|BNȗCDCICc遽̰.4ny+m0Nm)y1nbvɊ[|,)GC^RUG}(}.mLk;Sv`Y*Otã~1*T`mv;F{`ϸB@VF+GƏҨPh67 ,m gQQ(~=KgsF#'d09F7ʼy#(+#GPWFPA(8¾2_~}Wۺip!|)2/>C/Χ endstream endobj 551 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2978 >> stream x}V{TSw>!$ Cx=BUow-R X!@0Q@^$(o_@jqکeu9vj;vDGZvc_оng:k;9g?d2QMMs焮f&fOei9CE)K^gxwb$,L. ܴ +]yaΜy?4IW4K lC&јYYaʣ75LFM.-13UcJլibc"Wh^Y/n2 l4oznb~RrT.Z:}!sìd*f&ìab̳:U &`˜Hf13yy,e3˙(f3bx2zYlcMD~sN\]P`rk9n b{?=s0 >O49}o_Bet_\+yx\J`}oE -{Rb-(:EU]rx)=q(&Jr;eGW\2IS&L2ce1uįNZXW^Tе)ANvo;'Xk̀ 60fQ^=~!WbV  |~Dd`6P~ npWtp Bouj4MP[PRYYV*Zk}$x'PW|BUeOBN}"Kنmb{Sbk8A}PO /gdR44o㰅>ѷp,Aא143uS<ڻ*;+^=dY߅SEnJJ _O8ѐYMDzaKA(J\I 8)>_ GDJH(#Y|\[.`uA [mxek.i"r(x3zSsc\ȵٲ`VNzbu_cwΖfj݀҃@Cw|}1}??*O(\C%0,9‡,$MIrdG&,$}GR'"kOϬI5%}G-KzEunV:ֲ_vE;p'(-̇]WM4FvdXOy;0E='J%v@F s++֭~<E`(zchRکBK_<^J `tc&'XJy1NPP>s>DoaDoXn}wn;9Q@yHRڊzt{$Qe &ёfSuf%l̶59'?[ĹadNXb ǹse΂`g7׫jVY-:Pq@VŋoY|zR2⦇NFWO& LmLZlH6qVO|b?=PtӉcDMX<~dQс-(f*6e딽WaLqm__ '8J5肿_Rk;tȆ(X[ྪW#ҳIb' +a %sk8% %{Uw?t/KߪNs5{'Į^w8Ž>(]WfJUX2+De-zT;=)UrpC&Kv jǂ"sE.d‘rfv?[VF2PS{Yl`^`}{Q2KyyUM +431W,_,Sݍ7IvaUZ~nkZwDZQ0_ջt"%O35>V~Ȉ )6 7: qIƕ;wIFz)Zx6A5QeW٪/mfn|endstream endobj 552 0 obj << /Filter /FlateDecode /Length 260 >> stream x]n0D|CIh/%VU{8X9;$=0+=t9_Ҽ,W3)/،|Sմ&a}prUކO7?ӣf[ K{!]:ZKiS8=#FR#kHe-&%ܑ  AtjՉÞ]'#zx ًw:={Adz# LiRjՄG)V} Z*=W^lAendstream endobj 553 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1429 >> stream x]S}LSW׾VVӶ"&Y0+X1~hTQ)Da*#cNEDmF=Ҹ׸M~'w=!&-&dl INݚJ>)nT+b,Lty &j3o4mi&1GaLffQ^5f4jf34i )sfyJ!vQL&2fA;+CUcа- Kti1> @MGȀBQ G(E`ĢQ4!4ID(S_S;n"nu4CCӭ?"Ft=޹UAG P4M<+fɊ!^ ~/ Rȴ?a;qfh)ݟy,J56m^/!ppa_ނ^ͽ;ȓcv/}y@Td#<_?SCHR)`bY>N!RX@;Q{xA'Òmܩ41-k;a;/v؅𙽧w:dRS IzVȤŁ8fD;4qcag**q PziUlؔeaʔܺj(id#|ZԺNOˉ*P1ʳUABОmרJ4].4'-TKȧ<Kk?>+ЮJ_ƙ쌜<,]عbELZɻ[gB}xwKH>*=2K\&^tR?͊+ +r^l+ S˺ XIDN< &^6=wGxB*4_ͳo4MxQ0Ѵ:xTȔ u2f+}1~ӵ%ǔb''_]/h| 7oFJRBϙFn]?~Gu~ג=[r^|wJ)f_|_mٮ?+rMn׀=>\7H?!&t8Y M*r^皡Qχ9*]<|IGd;a GlJ]EvÅ#.Ez zG}6zsqDn)F\ħ<F|iEk&KpYa>vFy#׏2B@x'ozpjWZK̆զE&$nLK~M9Hm8q3'C:Ygb'llTaj\eUA=XpPJg@`hA% h'$[k89\EͲ **Lv؅(п ;endstream endobj 554 0 obj << /Filter /FlateDecode /Length 461 >> stream x]An0D:n`&'MɢED^Dgwft[{"wO//~lWe~֟e~u|nmXY[|߇OKuj0mX^[wJvm+?8ϟޓuNJ\YN:8T'%)䀘WCuXUsuXjkQEuX *P !sGٻչ:gUeffeffefzTRJb"Bb"Bb"Bb""P ,GA1(h8Qˁb`9P ,@r@ hM6  dsuXyNA1a9!9A0Q(H& iD! M(D 4aQ&L" ҄BDO\u⿎׭ǏmkwwIW貴qzgendstream endobj 555 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5800 >> stream xY TSw!KjS{.:.*QB,=KB6v" **.-vŶڽ7}N?S;Á!~o",kʦRS.Y>+:{pg&3;0q̩Y3/ d fפe-Y|"nS7,޼8pKZ.~8?-50&>1:y`Bo `o N}9mMڠWg^!7z0fS^-&$n%<7oE b6B!ۈljD(1A'ˆN "xEL,$"5"b7XL!?RbXNl "&6+b&A)$b2 דCԳ^e v30D>@zǵ=|Œ _N=:iޤ/O)|7eZJV>s %X3㻙ͪ#)&:7e>: XTs"@*17>z)JtyF7rCHJPB}`39loHȱ3Fɤ :y]TԴiv@%ͬEE\&|V2gvr(A%mR/dZGWAcg%%  7K"lgɩ׻LT[U'@6@LE*洶PWZPTYC)\cr@_\Z{= "s6c&ci _r_SooP9,J'Фw?ɷ^fXs#rXЪ43s萒_ ƕ4fht2p."ڀsn'ۘwO,}-{ȟ>TF4v,f-AMțlusr"dwT?td AJP2Y*Sýnv ~!.0- A4ג_ӏB|f&3̨]@nߴF>Y/~zK ә I^5,u \4.\_\m(Q6 [Svo_<DyN:,z p>0^ \pFϸY/kpR`g)m Y$37? ZD*]2R#-h,_CKx)%)kgm@S8OYhк"4ťJU N$>JR]@33-zK(-@o0## n$}aFȂl3ۿJBDJ7%l5PWP؋MP-NiPDJl†JwiO |>F}2/g{=ܻ.q}\4@ r`uLp "|`J+&MuXoeNWDeb޽3wW+4BMi s,&_GObAY|M}y-g/T V8B ^o)ZrRfίhp^όy2+B꨼Wa DzNS&8P%qdGn9'Dԇ?_ݞG5dž\ڤHk@@EA1 Hrev('T蜺UbPce/RҢvH\w- =hu6Ӷ;X`6ڏs(N;6>\M^s\3jvR)=(479ў?DKYogsE1Wpo:g sHV@̋hS~5.Qn7n Z\F.kz x56  ѱcPr6ج3A; =ӎr7:s4[3W.$[,^}}ϙq Kgo5_!V0;?&;\*6}(}*9TU:FvFJ`ɒW wguLYL[fd@:i hg˩8nNw׿ K?*~ ٳBfk֜8Dwwt3T`!W) *Ϸtq=8/ׇGN-f"VJK-R735mBj_̳KPAZ&UND7GtCfәR y^~H '8fk@!Gq\ZlxG4vjQ3d>u9dh:}ĩN7DETq@;Gt;zc3B=c|;CHݑ[(/f9/5ЗO1bHw+YKDr@~iVedCYw8x[WKUN?ƾq79>jv>j:d:d%Pzܠk4b:<y~Ax;g&\;jhFRC]]@ޝox6z<1HűR#i*J,!%`yWԝuK>^Ɉa@Wb6&* Is{H{`]hBS,`7>/wJo6T![5>wI3Ϯq vPNֈy4404ڏ_#D] |7ٞ&]Jje| 2{ϝn[qX[{e1'j\]':7ޞ_9z蓿=ڙ7GoDi!N6h@|3t1 sNb/ < 9ġH-O))KKx0Q*n6@\\x=g`C>ʀḷC~'o݅shN.p/ce{|DMEGA.,8@ E: =?w=B*iHy /6HkЮZhaNJKV++d"y& tǞ8Cmx/Jk{VYefyn/guwOD&h43xH Ѹgr-KXVWV"*I!htjm!y;x$_ߘ dш~5{B*L\33gHYF#FWG }k*kKG! z/56IC_1ʹtzsu26Uki9-xՇp,d9sWyfQ}y<Ԓj./sGF>q2ojvyq,d櫨eEC㤛19<%_ree-:&]y%#9?-X9%~ӊVX; 5Z(H|rJyy}}uf]rzZ7w`cS?pT4{4^ȅ܂"!xS9qtW)S})FgO1/o"gCqBAHLUS&[.11%z'C:~VPU㬰#hvPoln=r\|~AdpOP{'ڿ>›5ע5K$"M`֦h[uXk: 5p6J4gKW)S+Wbxm2k踂9/Z#(ġlZ BrZ-?Y\(T/ KI r.b5X <,p~i*zצO{?;mN2zـ/Z|E|fA5|3EYd*!r|qWmzIRdAvyNskٔgKXh ڇ܁\hVdfƅMBS/omS JMw(FN'SPqob ̰psY*,SX5yv=%/[jkMF;Av6<$LV\hR-zC;ZKa eh"Bk|n  ͷJ88;me`,.2cѦS jJ#3c]/2qi#*.#dҌEPc t`a[ P䏵x$ocH|5+Ԉ ~qJv P4MMҚFw@VY@GA9< *#wFEE= FxON޷XlYt9@j; 52;ڪ씫tΌA7(gNΜ9Wy 0G,2|GW.SE3mO=@6yߢi<#F+߈F|qˈf}{F_TEJ$N޿~)C8BoǻTEG ^7yƶAPʫU:GM# UZq~BiX+ O2/q]4ARJRFe~C}UQӶYMVzAa?qb):*TTJJ(YHaL%PRDиQUD /V\oD\y^lCC΂!`WA>(9T&8ܧщkxdq̣T" w֮90$f*C3x '^=*(o.7޻v>huԻ˨;kA_[5Rfc6qvq&ň{cwS&_/(,Sdfc5.(& ҪD[X$ݺB%vp>žNvyCv5~̎\F.> stream x[Ks9>4{m Tx:bcז~ᑴ31;j&dn]y{P(Rh;:B" 32,¿I99ӯt9zv}rzɕÉG\Bj[TR'`/x"(JqYZZ^Vl5 '{%K،('5Ӵ'a.uFQEYn"g_/;RJV/e6z:5jn'ctYһ)*z٫aeg:5xi*T~=;a_<(G g '7UF2xQ\+;xZ |å 2\WqM e6op",iG gY8WY7ِ Mʱca0U mҰA?Wb\r )h2:]c\!]ϔugdgmr&*yiu.bCg,ZjϾrr\ga}#0S[yP[֙Ǣr!=na?$=uRd,T PtD*%z;mr|dun~(V&b];o{\!q7> ". MjU;4k#TbKutr b!َب O'<%(=%S-w g ln[mi +p(o M N/Y%/줪(x5M%TE@88\ z;K ÁB$Muj<0Lqܙ7#%_!ة_Gs CӬ 6ݢ>tRJn sh]6gsgq)@X;5a 4< ;HC, :arX^wuK8<&--)m\uN{~uޅC,u T|?iX `!k~i{+\G ـL7FtOK=*3u1rv >X×M>Ide}Y{_g~&(G%?!'Ee=Ib LWEU Z <Hl?4ņ5a`_|!rDP,E1 SEscz8wT%=/C;G0 ,E@><MP1TتɊiQM j%J?V$:Xry=_#Mfj!̼EERJy#g/kx3>-W['VYʘ[u4F^ RYr!S` m~w9CH$A2K DЧXϳKnB/O b"tq}=braLK%Zk,A8r/ U1^E}̂#$!UGL)\zr8毫}DVpr?⹳#F16e@mbUR2/q Ϩa16#t1PU; tCDC,*N+f %; />n6p})鷤12bX6Xljq_F+Į3Y1?=o>{ZLnxO/sB,WZi0B 92-1-y< ܣgj~Xڄx{8 G<_C!Qty}ߞ\+)LcQ^"iYa0Uk&U4HlCMm|ef ƚCAЏv{;# Ww Xw-Lr|@;Er#nT]G9?J YXa>P44lݣ0PP1D%hR{޶)Z큒WT t+O0NasJjx0W !3A[pᖎTr#\%eSzNok~ sw^5A>= Y&pDn91=qXֈ5D]RIAI2Ҫ|lJFk y(fty(XeZ_W0t]Y\\4i `Rfl_a]oKK𽜀upS{|_UFx'ZqٵxS8)lr3|s dP>tMS}ў`5AGbqaWT, Co `A;  \ EjK)aHϘ.m)k&|r>& Tn֞Rwf i2&'"ȸ8IGc|;Kz9LDad^sK:XA@_0QҲ$\,? Sgʍb|imHRE?w`p_[Dw0,ú+֗t7$/4;xXOq,lB #L5*1GC/vtJCr=/Bti3ͱ¹X9Lm #/ yA+(M+8 ZګDX6`v1)@q3`6t[^ Uk*M[yj Ƅ-a ָO~u֋\7nЭMt)I[9Ne.zNJe&5ǹ> stream xZKsȑ9/kW ꉪplPfFZRIΎ m=gfuS#m@QϬ/3̬ ?L/_ۃp0[_4:~xysC3%N3vv[+ge>~&g$ق~ܱYZf-fsdR%e_J$M (Җ[cY]ZxR;;6ㄌ6 T_`k=5۵XOR΃S8 H2s#ùԉ5 䜶)YɊTu^K9>9"dF:e^xs0aDS*웙!ܰ:_J(8 ] ;#Y}ߌ[EG!LG_~B3+Q 0q.SggE[㹤\pvUWQ{`Z{} me(hyjm!gԧۄc1% {̲DdL7by3 e9TJ/_siL" `D+@܍0e9B+ 6ɱ_Ivժ\b{ FRyUƆ9o:Xpޏ {l'BMa+R( ږ @S#G(::aY]0P;8j5SZ˃bN֏UvULM34d\v|}tTU]$d72a)j e{J#_.6y!$JЗu"t7!IF?P mP+pg8Vyٰq3ԟѲ'mޖmDۢlMWʯYWep݁7XɌK4}HXXrVh<ŝrv5I:ۅ&؅ Q? gX(Jr% ,h9#clgl".= .lFrMzkS~~pHs0yy j5VcEYkdSA*%S(KT  VŃ ) "O`0mS,Ƹ orU S ~' 4 zr7a^F/A?spup d`ػwT\f/ZWrуgb ] 7 | 4 r Kq߄gn,<119xxxHVEm_&)GX7?Dz#p*m]qy.X_٦z(MUۣiU]!l~nW l6P :NI8#J`/ PUt PoI8i@6n"64Dz gi1]D6Pe: ;-ll ͦ<1F F ZHM2h :`c1TU'P/{}v?8;u ˋSa:7ҔO+oF^1lуKI M=9 33vI.Еuf:{6u1oƻ>+^GFǮg{]cxYNEn3$i.n:'D),nA jC ٌG=} ך_ilm͞uBञSyɘdHR4Ryjt۽0x0T k&\Pdz p;7vJb eD86a&rJ`!dzK?sV]<$<]m%l`A~ ;΋}f×Ku|b̹˟E6 u[ԯmݹHes;E]bvvڻ4PŇhmڣP̥$| Z"ؘAʜ9T=K֢ɺh?+GHţ<V\ӗH_G<W˘;4Po=1y.dFw2}{N9*˧abȞ5M"b?Zn!L9f-fMD g(BO-`lB;[ 覄Ǚ2pS׶[ǑK^T-&yW/#&ŒۙQī%$pqq%57hc%D ?ΰ`gr-L9-lᾤ)8n!{t|>mB֌]U}VH m߾<>9>~y$G,i )uD9פ>DN黌)e/aeŏBZRu=|g"9{H4. Js'.9]Vq̂Luo:bC,̣iS\$\ tgN3?e%Q: } a&@@JvM-yxF#.^>N4-/Lht'8/ݐO2Kk|7„y̞2 0GFt,Z!ј{5y2b*Rq- MZo4~E'tfȆ`4ؽo@H #>qGe}HtkCrm݇/[\J 6>; >[#k%X$R_&3l,\qt3z/œة35kqj*i`^Mi,:o>G`!P> W".bF$ڮ&ThĴ*>6*S Nd8ESnj>YCyuyUF*yzܥZo*f_MxYW Q`(ktJ>ƷFK ?!h1ӥ4]QOaIYܧCwg(h"T}+fm*2(@L!7 n":o`9nz [p󋻜rۙ]jE! @{(쫷>:cURFuudww`<%)ńyD y_|YF nSj}ˇ(|٭tSmNHӥ4;˞ ĤeOv? K&u:3q%:Ĩl!Ft/(w1n(D I,c|yhWUl/T}-ݘd'ovJz Jj1t:0s! W`M6t'w"YOtx}U͍ɛ*FoQb}yn% ,2:;Bk~yz| Lf{hN+haX[=y=Ra_yѤ`־TFftɎ+Kf'DHIdqB$9'ۦ]7E]4G(}ihmo>+[8 dj w0U莝~RK 8^g u> ӒtͤmXfX-1hG6ewJUvTâ ŝ#zl5!՞tOB_DRmh%î)0urT6s.Ħ3PMr>ǣBkl a?iȬKBߎendstream endobj 558 0 obj << /Filter /FlateDecode /Length 3251 >> stream xZMsFrx˰1\N\qٻ$x+ 9o=CQήA1=_4,\q&Ih&]e᧤H 9:}$fI9Y:xKZ8˳DSnMzY!TXoZf&ՙL Qۿ.=C'#k oO}wQ: ,bդNySWXYJưm?#!~ͩXɍ&s*|GnT"19%֞bS _<&G#ou=|Hp'Ck ^g?e~Lqs,~7]=ϟ?Gr>Ü'cf۾]n*3IFs5L[;jIk Jbq+0K&pC*"`?sˆk xqbµהMhOg_#_9r\Ov츱?0IZT1/({F%`~N%Ij݅y.m҈|.J+UUff|*ssveȎ`\0%+vf3˴3M#(&Xa+|'0X.@_?n"#2}Íж|gGN;|>àԑkԨ>4S8&~m~7.ݶ.8jfL"bA\-bn:řԍ dw$#" ;ڏ_B[&WH(U޿ؿb¢pAetV0|Q^6S5Ȋ9bɹx!Ҁe:p|Wc 2l%ҳCxtc+ c"8^%&Sկ0#+.V , =he+[٬1 XV]mup>vatO:zD?+P^m߶-^8tXE&{%S R|u $&@y߮yb<-0?#f ]{TJch{۵,joӱ]SuH9LDEsLZ'p&0x3}2%U|k j0s2Oxf|jI<} uʀ \uL1/SIi:?j x}̈́NYR'dEzR(l;""p]Ȳ^N|Hx g/`}ȇY!T[#81+W޵;u\q9dAeYMDqF* [iu>(`l{> t.$N9+nTЮ(*' 6v1ɯ`]g}MExd "Z-d^mr4im8HFۀQߵ $f\(7c%FHI3x4Kºָ!9ҶL~_yAQsE|MVi k}wT?pwU׿r7lwi^ tj7߰'0,~6LԎH_&<1OY;@~%X5儇b`x4*ѨBo _# xkdfKaldv9&o K8-Ͻ'UNCwN%n_|d6{QՇ^>k@/\E5^Bȵ 5^;.?}mn-Wʇbl*wE{5muWhvjjv %LXqOE@}8bO|&&1h֨DCG6DTנ[OlDa໽5Y nB}j2*)_{G.GWu\y#i#N^endstream endobj 559 0 obj << /Filter /FlateDecode /Length 302 >> stream x]An@ E9 CJ< !oE BX@]> stream xVmTg3 1Hh{Zuun[@-ABn2`T~u-n][av]׵{_Nvۭ=3s}>*NjQTcF-ڐhOݐ1mjآrR7OA*yw>Y`W xl&\=VVϛ1/;#}fsSN '__k'R DMGg҇IY}Ri4ԕؘyKcK.l0n~eĜ}#G^ŽQy=#2 a%ey!ʧVIlwlDA7$,m{pTR\`Iѭ$iiI< "0B5zN5~jȎݲQ;0K݌"〨AOGP |<j%%+3S6Ck=n<\%Uuhw70JJV˳$?Tb`w F}+lbn!Z2lL/d[{჏wk(+y`7ݱՃZq:CEEermJwWNS)QM4p !r 'xdЃ {0P#ϡQ݂s>!>  cZa"XFй\^fIqcV ; ;A#]mgx8v3G/_ `Aqtk;[N[+@p@OQf_e8z)83 HJXGʞhev~h= hA#ͣ6a9le'QaUS<Ǻy}*,tlWv0 -ZJ+ld& ^C '7ۋ#5yTWQ -Ί6#iw Fe=dm/>J4=LCO)+o3Gi/%=vV^\kjj9ieY;Jrw^gEv}׏a t5endstream endobj 561 0 obj << /Filter /FlateDecode /Length 362 >> stream x]1n0 Ew7b[T\%ClY*<6gt <[>)Vyz_2Ӱ|[c*=NŮ.1nbn)k|,ā]Rv8t1szY4 ~5=ϏuT9D`R Msű)"V X6JV)`kÃRa7@UY[e23) bzC3){{C\"Hx =yLQ8II [kA0T`+Y) `&j` 6~` ZB(`ÈϧeznQo뚦ݲ˼XV ՚ endstream endobj 562 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3901 >> stream xW Tg4JCMew#+[EvmViYꅦAdAԈjb!1Nb1Nd</罙M$w;~ۋH$~k''F&(#ҧORFyNM%(/_zo P͑`8AI%&d%ŧ̘6m)܀YKڛooB@dRtO$&%'쎉T$l<8$`eЍ!sp| 0bF:FG->FLIo V,K+c2(_Mj)GVvȨs %&ȏE oS@o.1R\.|ǀ y^/ E[S㋆_'?%6[! ,Gfr͝Ʃ+7^W#\4:`/*cWa)\@R+4PAkeLy]0G\j\q9@5QW"HD ڣp><K2HV 4 АC-Vh Bv$mr?u䪭>uyq8M&z%c{(na18,tP_4y:-.謖dWn6_рKR)lT42=xk8Oڅ\܀A6]% jAȀ?s8؍9;-AG(Wa.1F-Cb zuG֫$4=H&%>f8=!_6UE? k~-nJȹs|]p}]t3Bfwte3^6ma8 ⋞i[@;9&-0;ZB?!RyJ(Us^f(<PݎǺ.5uGpz~,'DJD߇hCi(Q0U {hm_|! K6ؗi"$xW3PXֲQ7+o! ѻ5x=!J #Ԧ;TUȋe_4q 9Nr@Me˿ f#oTuʜR[1Ԙ1UggHq1H~iJsLP겒lG[NJ PfV +*Lu9j^S4>#(?D_\jLm^ϭ>^Z-2j!г! cђy`,Telc`2V nWU{dh\~t"n'=G1zˑNas|N5˿(6 { a5Уk4C>/RDf"}~LKHلg$HA=?C$_9\l>Ԅ=>C3QE^w];PhqY&g 6ccb ?+ 7 x\3Iz^~!߅2?ۀo8y0p=FR_`$򠙍ö66pq.@q/^~LD4"4 K%onYE|66~*{O/1?"k74x.fWf}X8k}$4ŧ@)1_-VrȒkҕW6Gں׻MO &ۍʿ:[ˤMRMZ"=`4Y[N~֑Ծcgk0Ԭ%VdC#Rhej9ls3n+y [iF3/w348cmz6IF'K͉~ƚ/Ya ~M7zź'=%[q25a}9dLLgm1b3"3w&!9\#͚U,A e4eq굘0[},J[gKpԆgDZB’tBâ/2~:v?r]uM5k@U][4`xz^"| Ut88uؿ(Ӭxw- ~AЅ7nnvaM$r.1^9qB6{$ݱfS-T"o͌Ǹ2rr̵Q(ҨKڌ|@]elGu];i&m|mO#AȤp[MKmuuk Ϯ}jk/hW՞lmV)iHcPNu9.x ̴|uJr1| {(h#om5WՕ66/!E}:}9EAa"lr\$~@WoȝE?K܀rgCUm1~Y}T@?p%+)EyIՏC@jஊRKk wpagJjh5ri{!> -YKJ=Phxs2ܗѫn4Q5؀+*n|~5f7JTV׮4tx!HҊv $"0 lZS‰@֋I399EME5==eΆ$x~q~f*)$N΁pBy2[V\}_*ݟIg-u(:D{zIܦB*Yc,:\-e2ap H,[.8I?4pIXv|,4GN_U$?)Go=%YO%dʋ8y0_2ڣ==b`Y/Rguq>[׻hc/sue-U9Y$5d;b;ҧ> stream x]1n0 EwB7Xr \%Cd4DgKIt9_rtQ^R3^SVmc ۓd/9SP\v~7l>8ȧv75+卑c^WDTS߂;D10ZYZD='AmufgADh "<2 ")I܁B ^_5&Ki>e-$ !Ҁendstream endobj 564 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1559 >> stream xmSPSWIiHL\FSHa;㮻 )~-EQTI\ɪ@U߮?@cuVY)ZNѮδ^f3{O 0"2s >~'!.hiiA \#RJ" sw`f$ K|Gt.1!!).Ͽ%F٤+,N[mEnLWT\ZyβQWN2'Wo}3Y'~rʶť9-oCXOII%$d$'DI"yDNrɘJ {5ly+,[vJQվM͇̐d{+%X .6>n#g9`ZaQ(WcpKe.3h7GХE؀TiTDtW U9sX,ʯ@;'N6 !å==zذsMyMڹ+ p :;߯.а_MݺXS9IEmӎbn{pg_ԅ$|_p0*'vzbq>[C%kX|Lu,c}hqi'?誹Tڳ#tlN "o1`}""2 I,% #m~B2ImsΧI)||5A">471hV5E#~EUǵN!/obԐ EYZu`2`r0D *\œG4+f8rX~k5]O2pkQ~j=L<*)=^v~E66)TnⰸE W{􋻓}ԽfjO&ytl'q_ˤh`;ڶޙb-͞w1)acSuU{w~l1 U9A1(śX;&D? cܴATG uK*֖v󂲑( \>#=cxHvF.(jrQ*G<\6p߇BC0!,ȶ3~ )[TZ8IoOsj}M1WMW9KHf%(O==fSm zХ>j;W^.`Vp۳XuQ;auX[\.[+h[`5I[%CmolCၗ,ʗGK9]r%5+_ɍ endstream endobj 565 0 obj << /Filter /FlateDecode /Length 657 >> stream x]nP=O7pMɢUD,b!}g.8e9݆yoqﯧe|Loe~6]~=g6__嵯DZ=m՗vqc|tiqq4[FoDgA j7C 4is ΌA<0AǦAѠ#ИDFAcEfMhFgMrV5XM^`5y^4 84hL"ȡq"%r)rh\"ȡq"%r)rh\"ȡq"%r)rj٠: :s5lauyހ5 z0 B 0 . 0 B 0~$; 9ohΉ}9J(Qb!IH"$ S$!~$$O)B'Q{OdG \ &p)`'Q5Mz֔7MXSޤ`-uT`-yނ-z ֒-XKޢ`-yނ-z ֒-XKޢ`-yނ-z ֒-J"+qu uڗe> stream xYTT־Cs 5bAA> ugw;V,C4XFMQi%/ggog%Xϝ3=gDss*?^OO߭ p}4QF1P? 6wOw`ys̟5/1s 7[3jo oO~ͬf6Fnzguvvfllj4~k8 \/12*l&u]muaߜȹd,\œ,}o_g;i5BMRQۨeʖJQ({}j;N9P3j&ZK͢ulj7PFʒOYQ MBʚZDPQŔ95CQ"ʉzrXj,eLPz)OSMKqC ̨DjNVR#U(j55ABGP/ Mk65h8"nV c1CV Q u˰Úo^?b߈ΑF5vTب/Fј#FFDYMv6+a_2^ea|dIrqyKMxw~͍n=oI~lI6Ф{GL?h_7ZFH&h-}>EEI1Y!( |IQHGTP0JLMLCьo=*~kQSsrjɆaǷQ,6?O +C^eƢ|^,A3D?<|TʉԇPn )!գbiuT# n- ݏsuνN0=K\.BE_5qҬ*U5ԩywaa~f}=q9l6^}s֝7pdXWu2-A(6>r f·0>xƕKMiEUb KQoxR̈L(8qS''zݘZP(k\'!?|"֯x~']v,e* 5! ig_ +P b%(AR?kRrXgӞ(L=Q-9y '}0G5(pnDA^0[_L 29?o猪\;* &~ 1Pw_fpބSk-(Y&O圫zYx)w$uLIw'l| \jfrvZ:^GM`&!f7 x&:Qq!TT K4]4zG[cB~C g`px|X!/ }Hb5jO]a=ٻƝ'}a(~ Т0DP#FuQ!})>DJ/SWJ@*d6kda(p^erjHԬ$gӅ:^4`m=2+~xN82t_7(?  ٹ=FG~G<1 ȟ &g_q`+{ׯF7Wjh Qk,UblOU` pc Z=.0t$U1c.g B)\=@%{e>Xe2l܎iJ'5uP9(S FgG&.9`keMW7Y Eud+j ,p D{s+Bu;Y&B[;~̯^n0j&U۪a:Zz5chMiX4Ͻh(UQwu+*E*62>w(m6Ď[fx+Ndnl_=15\L1#ơԣyynX4o5joPVf)j))lXuvvؾ%Nt+S/س cyiV(Din-xݥE/Om|5JRzk>' B!#NKI|5  Q*' FoYϪAve,}`)n)r9|iBVtq -e$V5=&,)7si^D(QEX~$aDE؂q.wl8oH>VVVۤamV]|(;I4sywo&Dƹn0yË7pfy @g߇aCݹ[C5xXӲ΃Dt[a&mnZQi<هo<{ 8x=Ј {[P<QcIP yʨd58):3Px%CJOZۦ&}U6NEz!@)w-EȴU60MTY; 4/cQ_O$m <_\ar/|eA!ISj M\[26Egk#8,JwTF 4c+>_#+4Y>hgufC?qO=_IH׷I!eU/;fr];Dyb}P##*2#2Dg'_eH͑ԣֻ֢|IAo33oI?.[Q, hV6iKFyw??#״44c0z`u +΢[+VXPx_߿wç>#K)c˪jʔIE9\əWYڋq deKZ]Z Ez驝 ,"%:и:1с$T?/(>l睺%3BHOIպ,Y'%bE6: !Yɑ#%Gѩ-E0T}x5,eSUtS,Uj]577|v7# D(IuOMBȏ0-w忾qf;9V ge.lftHDްz~7sppqaOè46+%gU5u_Nbpy͉^/a8 -mZPqq鵨q_O.m4׬_S Nj޾ A'xx4PrPFcslcdȫ˂6kT+ +H$O^rɌwI DwVnںzWa |6k8Qӝ;}nq1>ZX-D-;ժkfM8+M6gn4" "lU*W1x1,6k5>ZwX-z&֟Đ~>.^,k?+ޟA"43++1ᕁ_u12!8Y;wO*1I׍ă1co{J||ܚknu#߼vT 9YŢxJ-e;bP4ChCUmaCxh>צU>Hɹ ^Uec 08JRu!;3?0ʠx佋&sH&TWȊ _ m#:wxW>+{YiϦXDtB;5N^ &w Z sհWG;w;n[ݹp }͖GɥOKMBrSiidEmNyA& oOB9{޲ݮܼmmse11R򦢢r_-=néS ZEѹ98Xy+)E5x Hj|9|]M3uVhapmn|5-sC-P Ѕ-D;^(1^YF3-D-xT7()k'$%H&LV\V F}9a ,*g_SQøoJ r2Q Taۺ)hYbMISd-j3ޅc$ϰ 'z':ZyFU w~RNy6b~>#)> stream x]O10 X @p' aKtp'w'ezv =(ul"-~H0XT5 㬃]'l3Uj75O$V?p^@Tr48o1Ҵ4CN-,S,endstream endobj 569 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 364 >> stream xcd`ab`dd N+64uIf!CO/nn}/}M1<%9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5P\0000103012_i eO_^Pw*ˋ T\)S^m. =wnw6/ i^O mӺvKc?{oi~}/.cA<_qsg.a][%${z{&>&20}Sendstream endobj 570 0 obj << /Filter /FlateDecode /Length 6289 >> stream x=r9vw}ӁsrG/=DLOwL0vP>DV.=Y2tODoxʙ8/^x"ӯO߄6 <8Z2é3eNoO.??řR .8F9Caq&qݻ71md]Lsyӝr6ZD3m]^Qju5~υ)ꖫmhdwSؿLaYNsY^Svޔ/_ "SNδg0o-zѝ߼PX;/cs_>"<^>tvFloim4Nm.̕DN. E>om=a?kKs9]7*bNq+x a2ZkoeX RuZ*c>U@YoF$B0\"OH[䃏c.qJ80gB1C4:mUG⊄l \TIQP}Y'L m7HkچیNa&8 DҨSq*98UgFn9T :LjJˤs2L2 ȋ,g"_z(PmR04@VqQIaN#L'0m řV%fqvPqڱKIo6 rrW\MiPki+h_q7l$i<)gV_:L!Em8 FfۿC_|]GPVjI/# ]6Swj'”'pT\ϔ.MS!Og3wI^>-LӤ1A)^AD6*5e)7>ߴV5ڬo7L+$OSJVdC(O Gb}l5v`XuǨg vbWθWSe"9=LfZ?M <98wRm[1|B@7`W3- hID4FRKa} _J@ NeA)Ն]ϨG:RY L ̽ZEqP%H$q'gU8IL)mv&ak[:v!ަ.+пY5: UEoIO wxPq lsݚ>C2T#Wx ei0c^~QuՀCk)h^~7)lȎk:CSA Y1D,7Uqx#jf]JZ L2ǀcSB0妛0Q&6ӬbA:*Fܕ }1+ˣ>K-԰'\յ*ү7#J,헻_#"bTB(P8un1͉v֍d,YV^WTh+yجH9vڐ;nhO-X*zqP墔ڦx-?z䊀RK8u4+v7/ O%taaLfouk*g\L[52U(kK$Bj;}*Y]n7OUeQxHn O &[f{]Jh2Qx] +5>.&ш!&13˯3M`]Fu '۽qco2y.q7sb27=uHZs$6ϠיıOd&j_4zgG%'^)'qZ wUMڦO_Q 20 XۣᏁ& ] "QˑG^ļVSrL2䨧+']Llz7X%;_Sy[ff]s]fRL=S><;|Y \MH2lZ5l|r՜ϲ=x՚j!fw c i wL: %W_S'{kh?ԞrԵ¼u&x rwga|]7Q 0hi >Wޢ{3oF;ݜǞ-h?ֈ8h5$Dl[QV{`x:\@Ug'jq !-%^ɭ(I'iK6e!;C~j:MW?X-Fg"#y1@)OX#>#d.f1$ʨf(z TsD8>t{t}0CdFع"6أ+E#p:1[JY}T.G] H;uǀyIԼiQsz@v#4$u*?"v1{NRU9aT˜o>$`* W1623$fױ̳Om.>ݐ躿}^^gHD>>j&H>h7{`V呏W|`ֶI]lL( -ՖF"2 R;)8&obN+-Q* 7izKfj"VP:5g@chW!D`rHƜOib<jNpp@͜u"z3m;Ɨ~]ݍZLk 61Wۤ)x*g3(VdN,wfC9ZeVcnj| .wE/̆jT䈑1hc۾R:w\?TY[ZWwMn8p F[S]>(*@Y*+DǮT|ִhv(! j7`#K^Mx ^as"UpO4Qn_X2!Nx &硏t`ULu&'܉pcE˞>N~`Bw\c:Ho?\6;N1tt3rz>MXi N^F+99Z9$9M,+?v bxMYjG k?*5ﳭ[J4U8:e5gCCHto˃:rtHr@d,E͜\1)+ S:近UmҝxUQ@,)RfRD&n!cxa>̣h*r(h9TQ9zEV]wCynX~*(͝hOcPUu}b8YV^V6xSoxn=.Ǝ 3a?7gz+MzuATC9OW;0$C/mcyOywN{TɕR*ٙF=[,URōUkt؂<6\~ۣAeUz6 9yǿp\{?Xa\\aÍv+k"_Suaʔ41яyV5)w>@Z?-W_)n m;*v-/q|ym+RrB%Ξ7q+{7Þ:fTX7V&8d bgK+ևxE|2}l!(ysT_dύ= 3CN3Bes!ߗ`t||k33IhKOCJC2Vn{Bbr ̉ևl> stream xYK۸7#Z1xC]ut43J,űw} T0_w %[R 7]-~]0vm˟֋|`R˖ۅaˊ-2j>,3Ζ" RsXoPw*cI;<}).-<{'%-QK8>Q>ZM!KJx+"?+)UI&D9m,UJWۥFBEf L%e/1p6IE]oXImp&n9-U%+JkP onD$~XwqS71 Nߊ(ѵ_Oksnn/L:x ɔ˕ӭ2y;>z#VZ$VRJҒ5AE/M%Ej je__MpN0B jl% 1wH/U.PpiaG"FV9c/)VP$Ӻ wQkT L<:a*s:ZFnF`E.P۰B?"oAS< 뽷J#_VP}1p2[^jʿWhly 5-] zR3\Nl0=Z,Ӂ`!7f)bCl1 %s8,w-Yɫ0T6'ɭ"91=y_S~b g  <&B۰ jT>t:R}g)tn7~_n3xV4"IprRishti'Q#*jd _ydem\Tn[/~^N\-b\! wMxX?X*!{ ^Xir]TPBOMj>5a;]kj]FYđ͍7˽ƅ B|bN[*ROPS ~J6F@˂t_PTs'[Yft{FE6}Fǜ-BY.) |'AC4ȳZywv?cL:UxZ0Mf枌0S9o{ b .YJJ8ȊYeK?YoDlFPj;8xH;9ɧ)P>̭%xN{F2(Հ *y>CR#T8`а"X1 MQǔٵ7MEͳ"Ɨp L%}J #_; b*(;Weq]0*n8 gI@JH(,9%3ĻJw:\B΀47NC22:"sm鈓МfL#?>->tfw A}+v!|vkol@%4n ̇@;O9z\VkWkTkh yS`5-\s}FO~3$Snh1skf<2 2~XLw w_0iPK>7es[3npOqQ/eyw U[0VNۛðV ʪ޿Q߽i@+@& nO} GƦABDsEs7f綾]ڎx{ SJO~u]YaH9oo! aUyfwN t]Cɉ3|ƛc l*]gH%p^[x,uſ|endstream endobj 572 0 obj << /Filter /FlateDecode /Length 3828 >> stream x[KoF< /MwD[eDr}PåꞞ隝ehB;]nR2>)_w>*'ˣwG<<ɣ_r瓓7Gqp%.j˼ԓi=dәy/WӒZZ^r:Й|:^F9}Q_AKK+KS<6˰EG{CRժZ)Q"{X?h_Ƌ㎊,U +ާgOxm ϓQ.gʣOGӓߎ;9%4ՑsL>9P1'#-cwOVG5+$CKj>vUԕx%*MgڕeqLKS_&An a7>[o4́plUmJp"-nN۳zU 8mOTcPwBQlA.FEDA]eŻlUykt~=inpqk 9|N@ޫ +;9Ϧw*Fy;0wPʹqDe>s`lZ(K4=gܒvZK:SbFɌK!̛^^omJ!IQ"jZp;1^2m4.(wy1r.4Ţg !\g ؖY%sǶd$w-0W*zC%eNֶ z:oocYM*>LgJ"wMXUC~X 9t]I^+Mf۴Kx9RYzG7Ŏ%.*\lekJ%X9=T@Bv/=@tIHQ-`e]"xrJ3 g U2UNAJ]M,N>!dnbkB##N{#A#qٜ_6z^ͷ/CbUvBք/`ʂ` `/.^,~dq=r$y}A5-O=\߷X+'Huu_/BwD1!w)r5T` NzAZ1!ʎ+VE_IXͬ!Hqdn|Ji3O!HPS:ܓ}*D)swOe@ Փ}&{]].>mTp"RTZpC漭@" ]FJ"nԁ.3l$2@U: r/`koR5(Ɍmq3z#ZsEaTzp v3>2 ӍQE .}IAj^rk)`@.3ޓZάV0n)qΌ)a/g83\E :˭}Z `L12-65ֈH$P .7o ݧ>gDt./.zЭmǎvv{TQ6c: RH-]u AHJh %-AXwO>G 4$(529=( Ν2U}`AloJPAFU\B"h%v>BYg.u.lTO4_-&JTkFnw$ 4u) g=hqr8F dctx;ݥ0NG$?}+&sJI3"[> P/T)z@ &&f .ސ|bWNOhE]ԥ&D]?-;k{B<6 &`yh|FdDb$aOhܠ]!O ɜ6-[Yۻ9Z7{A$" DA)DQ;FSDJT NCiMA@+ ^(3)5ITb'<PX4V'$ҁu$OMGu,Ybڳwf:Fnt`߇AI;R3t~{;k{'p jH8΋*sGpf/|)9nƊ}tZa3⌹GM8̈́Mܩ=H#eD&t0T"YQ=TDaRq*ԝ8m9Ӭ-qmmr> S`# p>m7`VA{.\L;q#̂&1cѮ;h[d}?84):g妶ObXC߶M7^"gݘymA&y8  >@Z;n3( 'c8{u^k@l`?PLft 4pg׸$<ԼR$){\MD$ni˺ =j8Ts^U|LK>t6O[FT[ifLjCQCH, /]lZZ <5BbpĢaErsr4'Lݤw} o2%yZC]jj(:Rk'$V1bq@Vjb$TKqMg4j)b4䉉G*潣WO._Q̓v1XWW O_A W*`G{zFtq̙zHJfeZ TL_xDq BLfA"T07:4nbS"8C&GEo~ս w "vAz;U \\(}uoNu܀BlJtdbpF-Hk)y{ Zz1;z-|=w]q+Hm˻ !(`i[u@&!q@Q0`rU[v&k4WmSZ'lz . T2[{{uDӞj?!|Þ^\a2`bn _AêG %Pm7d([Dٮ[#RʄMMb B#EKMDhԥw +A_ĦT1zoػ)0x[8&格3Dә*~͕" R5M  Ѡ )`}qQ3Ǽ9|y}@(a8%`%A0 rh[Db>=o)4UbDZP4|ݮ6y>mVi#.^@CTL=qC%pY505 7û=o4z4>9sO%s]\WP,K8XnvT:āf.31Zt}x;!NJ=$@Nbb2W^q}Ĵ 3HG2lul*^J7{ (T(Zj WǷ똓bp8*^ϜW\%cwJˆtfUո 5lHI$$9I{xmЦsi:{[BϤs{G䴡VHFo3چI2qPW},kFɖiRhki}#ڻwܝ^rY;endstream endobj 573 0 obj << /Filter /FlateDecode /Length 2518 >> stream xY[~o胐 g@7AMbE-JuYS=gnk;hÙ3˾?zQ:??S!a̟s*X%k;RW0!UKYe-#uUKimbpQvukiYKuO-#ɪʩKDU׊9fjZ^}]/ 4P^/G.[l(y&8V!r}jͩ'cQεE63bvΗ\VF)\!̀>ScWltiEA3՟UH % &.?(Evm߷m$}wz*k5ivmp C *v9>A]kQP,%y}hxlNnjLbm{6`6XW?V!?eL?/u\]ݞ׋@ d Hz+RX1IX Ƌ,%=JGp P^ADA2ȥTdt@cmYX2yfeE*!3 brtI ӻb&5[pQfc‚*AN6l:$|HͬS9~1W%p x-g<@  :҈3m)6%iv}*K6wCC.F@DWz Hgxиz$̯L$·<7*8 & $O(KeGGBrj*N(4v 7Wrq*t}vzj})ꅃhVv8׷e:601YsH"\'7yQ>I "]Dc~$ѼGP.u[-MaXai=Z:&>du.~~cK- w>!iќƓ$E`U[R w@5&G;K ܞ:`PӐ$@MP&"b@J:;%4fj؛,O~ơ ׯbyhwC vth5pw6~۵[V ͦD~?Kq vRfa^")ۄkRh}*'0xzMHN. GpfFB;YLR5TZ^} C7XT*Eߺ(p(uZ24wC}p*RO81aevAt˯<2 5,(qamŷP MƸ08Ɵ<;y N}trŹqI]5K%f꧲[ wߝ0M0۶c6yy)~tg5Qː6Gn7I|JÜ}aK }76vxXw0-55+Xn0Nq>j>PWbfvl6tSv 1eFmܸԭΜK&JA.89,'çPPh&9Dt9)'!X2^4Y*,bu5 +<2P)TKh].bOc_3 J~ e 2)"0xK&h5S[Δp^ ŵeV 5|+)x"VnPLyAPL8z9 Ȃ!7 FP`y` T9Ʌ4/@ RfܾW|?8XL@ L466&MJr㤯j4W{\׿ i~endstream endobj 574 0 obj << /Filter /FlateDecode /Length 2638 >> stream xYKdu`g3R*y(vV[aZ "KD$_y3XʲDtҒM) /:8anu[l_'|hKMOæM+UV|;!Jn),5rl]TCui,ܚ` ZR%*F-[wJѢcSZRnUKC3)UI&˄/.eцtoBx'/0L K~0d9P*rN\+ $mg&@"t&Ti >M+fK8L5QeŵU^v4$lx\6nߴz). 5ONECܡVnly{=#Cnͦ=6aiS'1d%\r*y~<!:da*C׼G(vI 72<N>zy; rw W%/%/ bW"U(h)Qb` K,[O5 DTPP KsÄ)[)پBP[eleL$*1JlP"aɖ*Ѐ*cT!$)0$.*Jmu fQC]xye=qQjg6_79Z!^ `ɂ Y/ށi݋T2pMYcmEۄ//圐f i!62c 9^&?B@ܸ7Z[DherǦ=ߴO+fBߦNFE51Yl>|+7PHKCwq!̥|*EKx^`ۈE.R;[r {^.KNBL2?#$>3PW1w>+W"lQSr_3j "p9<6xxKƴ4d\!ѧ×p,EL+can"er]'YKqyIk1*7~뺟ibi +@/GGIo{úWN:QYI,ԕ{WI- 捱d]ZT~T5 ڊKϨYy}=iO.Q6iM~46g,m[*nE W&nʟf×xC.nXe90C0M[tq90zbk "x?0{T%y-౐0M7 iXJ{0cǒQOOKޞ8_yT$]^L6@EC"b0LQ03:?R l4 eH wTGk8EL(!p6rgwୣ3iQ|*_7 D\K7 _G.?:q,JEKIu%~I OTa( qg)b4|^&@g+--K#+o rc|{(x5JJAt_bay3rp~<ΫQuŞvHqEhL7ns<5S?܀L,d)":|!iv;NzoW?e'C/AeO)f/ 8oa!<P?Q $$MogYW3oIS^+,|0͍?-endstream endobj 575 0 obj << /Filter /FlateDecode /Length 536 >> stream x]@D|LwJ\6=$aa1b}8j2]pzy.}M?뽝׏>votLN>l|o{=;nn S݇6eKS<wcXcW`k9ԏ">Z`G"~Jxj'O=\$X>ҹb#;NEJM{-δH*H=EeKz`H̀`i7+s&~Fެ̙3fe Y33+C|߄`D07! MFC|߄`D07! j\3]ss3]ss3]ss3]ss3]ss3]ss=hդIG"W*W**YTWbu(UX__%q`o7 XCAk7` y! o5 xo7S8<> stream xyytSe ʮL$(:  ˾mi6ifOnd߻ ])ETgpM};-2;G{lsa@ bxyRy|4akR3s_=$?,@T:v|ܯQQ0jytv,F 2zW-QJSR1O̚5g 阝ʘ3c'gKcc\93B4fzvVΤIc6{ikWmX?4#>sgb|T$ϓdg'J2C潐ya/^}9O$iAarʄE$%NYV.m}"ɽ|=̧)*ij*zZC=KMRQ먇j#(L-PVEj&zzzZLͦPOPK92Ij9+j5ZI=E" @`ة8.jMQITX}؎Q;>=;0Ew][~Ghjtݘ;~1Nϯuo_ |pH-}bǤ''w='F1E1N^=yה_M1L}{6-zCb1:hPПT# ht%x9hK 8@R fQ8aIbI,e6: \Tt6+y[Sh/(hPSo1p'B 7 +h (z<҄#3M_[hȯN .N["W ս=hߌ^4{ryc:uEVY,-Y fZla7SN͑Nkits*Lev]cO>/9mV8V'ACoH=| =eo<%EJ+hgUZ3KlGQ7s.lkjS|4@43 &)@K]`d,>+qjݲm_AWMe_|yVmzh֩]h_wngtg-% ŜGhţVy9]َwqHp2O6 Ќ?P/*?Mb(=6xQR!-R퉸]Ƀ,)U;K*IrNc.b5z zL BPCd x| e54=!0y"KҒ^ZGfOccSUS tw]i.e_: #Cj9D.[ L,"x0r?活-񙠐xFxb={vfq"E)EGY<f: txI I}0P&E(&P&Te ͏x诪Y{!J$"'Dz.r7ucmhwd#75ATsht_e[SuL"=oB%.\E7I:[ b|+ϵv7;I>Ӂ"9v͠I._$ " p,EhG-•œAj_?tL p,` Cز $^[1 J~{Z˿ Hg5pL:2l=rֿ$J՜Bl:AlUFB-%i!+\LtWaMdUl􂕰~;+~ڊ!G4)k1_Buxa%hCP{AaEIdNK( 5H 4eoS \ 7"ZtBV͓嗺 0$:wow?x#Sy3W6\mAbtј^|;~ז ߹/7S^K6L@S/m zDY15D9-<2s]ğ~Bu!Ӣ%M$ߘp͵0ZePCL6~clCyDz3piXޅoCiq[}=-S tP{ǝ(D)ᙜeWV/}>ߑv1y٤H~gq6[J^{!n>&iHǣּr;q(h@촕Tx 0 U/Z&oA<\H @tTL40?DWoI=I1>x[Nd30V<F#$8m_gi4Cn& 'Az(t)zӨVt¤,#y&hUwC@:Um*̅A Uu_]&]Qfr/ng=u-x:>;@Wwd-I1lX0,ջ»O4{U \v0>z y?:.#8܇4$u!o?cgVF&s>BE+qR#v֠ -bH<ֻ*{ƒ= %;REiW</ 9;34$1 MFx,# /`@ ӇL_ n>Q~=wt(rYFxtmQc,&}YYUĬ{i)H6kt<~GWcx 6ar-l8yfo{eSSI5aLUG oʂ~?DԲ5ǩor _ n@ Z~" )r=d̼XjWWE9_vSZSLnRRRΫT77u"%x*嶳ՇNۼՆZ}* "Eċ%-T=fj;Zqʂbb>7\URgc(#2]RDn4A3\*2?#'I4-4F ;"/{( Y?鋱,R& uCwXч#1j lv][GN#X\Zd&9,>A&&rBjAsiO0z6,8tU -hk-? خK-3PFH2¿|/vfޗCچIEÍYR7&U_䴢i7‚O[vQSB9=1p<|/mrUGfm\C_'ӵʠBꁼ}wSŴEm>tI|PZ8mN;w+wZk֦W{1`O:Oi*UP?9!ؿ:~Ĕ-$No-$7e Z82%,r9 ,j*x )fRu1gW_>spڬiXlIOlپ_* MuN~2agW=+g#uF<7_!)Pf(oQ>ҌF46ŲgL&B3OŻhl8\/@pTy b2Z䦒]ը z z h~5Qsp!.Ϡ۝y%R:FA#*7R]"٢F uA (ZLF㷹*ܤl7T@6YoVV2kb@b$LTc?>6C $~z喂]f9 ,'1Iir}7A6A|9,cf*H{E07ӾCұӟ/_ 7iW:+ YI<ohhjk;{hQp8o-dZ0ڭ`[qDPZĤwC"F[8r_͸|Jw};bX 0sZ¿n[ݝp+'*x뽨~w :qX'⻞|\h'b3j9W0VJ|ѽhF Z>tHnGI?EdHj⋙VVgs͐)&)?b;q%k]_YT fZ?˲8a@H|רR%RɁ}őСZ !#I Q؋ |8i:(tr!T %{]V?7Æ.cyU}hAz~sdڳrwra6l]^$7oy=t+nys L6dQ&1`oۼL2:Bh7XX5?4AoABAoD9hbڀP"7@YMn/p7FhRIQ{dD]3*䒊|?4 {FH "8-@7FE.D~)+ ! b&v;n{y=%Lǣ7*BIЇZM]ACN"E t],!pJ@쪪qXf]7 z='i$:UCWЕ"jDY<w`f CH<$a党i&y}>Wࠥ)}O1:J}mw d_?t%\+kԆZ@[۩K(H'][X`)BHINuyݗ2H0e v}yi khi9L⚔$oVE 9:NG Іp.ML+Ω>?m<֒RWHW|q/ʩPUA㠎۴)5j?jHb68N f*l4БkB{EyuYYyyYYuyuu̘ߨ Y"gYI$D.C;T7ϹB"G!GdF _= QVn5N&pGa endstream endobj 577 0 obj << /Filter /FlateDecode /Length 231 >> stream x]=n!F{N X%kq(},"pFz  ߰?|jYӗxURSrRu:>LfWh6`wa7CqIxm!bjg r5-ѿ$1V[H33H2$ ZVcA Y'H'i"qqW=䪞x*}J_\S-SP0vendstream endobj 578 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1604 >> stream x]TmPSW! ܍Ak ,۵XmRt !QL &!!BG8bWLQdmSݩZ]ձ}/{i{̙ygy9gP"(tSbvOW#wgg"!<@xI 1<}(dbIb9C0EJ,m֮+Vˣc##;koe(IVTb2IksK{;wTJJ;]%?nٜ,m=EɒScePT,[*S, **8jDɨ|$=X0,%HIt+4w3q*{^9.S{5h4ؤka ,B|5p=aoeioVU'lf^54.rbkW/]e;#\:^U; a=lwT SkE;-_TVk 9i?8f`s,g? H@xM.wcb p?ybQ;znts>f>p>&q*&m1]\ΩSlt<(lVs!*kA*e9K[I=aүcBl`c;Zsg`,e#`0f iOyD &\?h>T rif-{G};& A!,`:r`]SS(LE;>8 ׊[Ra5g~j^AeBZamㅋ.}sWځ`4.v1?KUֲN߇~sᧉ3✇al+h%u4.Mos'Gk$VF/x4j.n2go^^OHOdN [[f[|N:i(NUSS'Sa싔P Ƣ[WK03rc{-m}N8 +a՛{LF~OΖtR o g/Q$ UZa5wّս:w>m'Ho<[N|L/ߢn2a2x3tܩ~㞡 cw>huw؎䀙.n?}G;>!<%KH;؝f\liW@:doWeBhJQjI$IWh_]?ANĠѷ9h-82d^/$ LӷVVBS(op`)W 4p&­GΣvBNz?ԤN66'>> stream x]MN09En lXF3\ q(4 nOUuq}szxz|Zk}_reqsUӒ?1[uzxׯԘP_rS{,ʗ|lC.4<ϩ*boStH?vK  a8%*&i7 x*FO 'bIIF52@gо%)@ sRlqRL``TȒJF(U2d I]R> stream xXtgl""dBB eR:l rHwԻl *z@ Hl*6=[ˆdf9c Ys{~.gXNZbEx┼촗fgd楔DgzAn"ӕ~paGzhn,*n̫PhEE%Yű=;s&Ԋ؅brcS cZ1+vea~1;‚Ԍ)y2bׯ}%~mUW1RS└)3cAAM_)K_,+_^bkO3w|g*ge#9i5xtZ:z88399989q%9p,\YcDw >'.N xxOd^ `2r<Х0j\1x*<^FcȯT18~}wѮ.ƲheZƓql .4f~ VޝkqQYP|֡U7/$_EXx\&(AGl5;1Z n wAMU lE \MYc2zPOe|3BySoO%YJZLfp{ց ,>r#8.jzb D^̑ qO|2`#:VKj oԾ ؑ dX9V ` z=o|x ;Y ^I),>ģgNx?p|S2C9((>֊5!5 sW%˯.^W߱{hrE*Ȩak4FMeujZ}heHQ{|⎬~G{Yx}$ H?tXh́Zz8ܜ{=gg؇0]Ŀ5@]M N"5PJ*>R㏼ꃣ{ښ۩,vz0-7 x# Q'x7v#y +7ܪESm=Ļ5֤@mT[jCۯC&>2.UIkq)KPGޕ"R[H&ƳkX(Xj0RO/zxgØ#㡘g%(ϸjd- c y`lc UgŨAoԙ!؉}' p̑%~,x$m^0ߓdKr,[o$?D -=Mm]Z@ɯئe(oiߍwZ,Zoil墇E_DpkF0ӑJw^杯wRHAR%DW^xki vrS B>֞VF(.),m6IAK%{K9TźoeA {ؽ*9.;X7=jNi2TiɲMW!QCA⣮}ÿC,CzTsv<:2[[$?+ߝ-*V+QopՑ?CSg=yW(7FoOBָMTܽ[04RHK.b˓͕=Un8},n P7J,V۩)mExͫڬduҁ8 =3Cs.ه1DUyFdI2vHAJ5@XDJ([^(D6~*+JbcG]W}*F Oױ}(. QaqԩuiȆ2![^2Jm@KiLGThT{F/!@1,be*lYh7AV r;B7†~ 9; 7.x A3ɶ*`}Wg&e䤋,w%;02;P W>Į`aVb&fJIA?}6TK [;ѻ|P|,랙%Y[ib Ț.Ϟ @w \]fm Zx6F$hPx(2{FU3Π Z 3nk 3o}z/:[` bS4Yz 6g! 8EÙE6^%h *uJe$ RFRVVTzNcORrsҾ?1 4=jUR9t:yaPmkw[7/45yPF4k|cs^oZNŦdc0{g~`FȾ;" yg M&s>6g_v]5P0UD샵}u^YKe1v&lxλ3E( cwg2pk73~l@cY  bkF f3As2.ࢗwzJ[Nr.YbdAF ,pPSkz'd'Kuv5 n~A|n1C=3Vk G#}"UOc߆DׇK,Eϱ2VJ,aGyH*Q.RNMc[ /`H5`<Mzܺ8@|,qɋ"y87~9是×%6&f/%b0oyevh@P> q+-+4>.vܓg̯X<q=jڧӕjE_q;lG0,3dEOfFx!Ө^ B6+hkt-i(z>Ϭ]WU1S^‹}t^ 2E{7 o ~a:ޒICU0' ŽJ ~m-]n ѧZ'aCkAP.;YmP:k؀&DQ>@,KY Yw5E1ľiVts}?7|[ůlI̎WbaF H[i\I &m 2 }5DY۱s uhlWWXdAoS[hT-@xZSeBҠ;9`[ yĿХht^Z-..VK@ϔV<755 Lakit:7] MNx"]Z<߭/Td|բ1^(wf]M~Oss9:|U`UOQk~(Ǚ䍫;KćíX p^Zm%-CZ8FTʋs xr#Yޚl moUvЦmӾ-瞟5u);zg:{#ɸ{An2d*"X`t8MpC?F!?lq՗HӸ(lPno>OWa Iů%Qo1^[&8z뻺S'Nz-$Ͽ tz6ٸ5>\_j ?"`?~KȐGZCQe àS`c?0u8Ue.gJBtC0"F-7b$VvfosGG6VX1+h\M4 ZY<% q(b9<1T0R9]IkI]p )~ۊcG5t{PMFw5s"?Q})%Woo_ZLjn,[`t"$L%lQ*&7f[PWs D-"XL1U|gw#)U1_G^$8o`3Z _E6X~x*)&A kZJQF^{.YPGo9EOd4ouDB+'~;v'Cot4jDD>[;?BQoqbVQl$vl tu7DQg^J#IiJQ ;?$pb"GWͧq~46USFD̎So/u~}-CǂKmLn]CR8ٸNAGoe`f}M$g $hy ݊L²KL>Vmu8Uw(e*ӆ{;y^d) B5 q cg L* EAAݺYY8_{jױ]G'v"tKZNzNb $7,wq( ]..vWY+1IiRY[j}V i|6,<[mfҧr+2bsUT2 EBb!J 2SO(0Kync EZQheE@W шD9F(:m=*/dt66EV|B[!f8.:۟Eʥ@_diܭU[n+؟#"OR^xPQuThr԰u1#fev>sLLbp8 endstream endobj 581 0 obj << /Filter /FlateDecode /Length 234 >> stream x]Mn! FFlM`D, 2YxH|?Բᣯ 7KM/G JUѩashjؽ}kݏç3NP\^ZC=WXӿ%K3Oj@HqR#cRfd5^0^Y+G:y 1y yr^ M Q~|>z&w)yqLomm< HvIendstream endobj 582 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1472 >> stream xm LwްWmd@5> T5j m][^R误+P [q8>Ѩ3sF]6p!g4mQvšY䟻IBB$9pج٬0̜T*P֘H2%RALXp.]X<]Bi$Z~'Nm7եeFŔ쩓&YfE~brKd(W+ڭEYKLViUeʊDBFbł+jUJZ5T( eJQW5^3eRU4*Q-QkFΤUAⷦl+FM!rfhC#H h#d qxH֐'SMOuP  zI/Hh/{TqiVST=7u~v_\}X1mU`8 |<$erYEPE Ob\_MDsG B[߲,3@}UP-`3WqN'݋ZX( W5VdztNFf5EHP#/ '=!9HG*K#o533zA'Ƌ7hw8¦֥>!m;x[`ĵs< @ %rvSͳJ[7yendstream endobj 583 0 obj << /Filter /FlateDecode /Length 335 >> stream x]n0D|Nh/%VU0TCCSU=xtƭ^9Ʃ_m)]n~LqM׸9._K.Q_5Wo~Cim)q9 ym5D7) l eLݩPb'ZZcBkF(`hj lP@E$Le5Bb/iiժC^N3;:;-vY($G|+xGc<㹋Nxvk7| UWzx.}P$龮y8?qʿ#̋* endstream endobj 584 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2803 >> stream xV Pa (Q,߸k0+"lð2pV@q@Dh4ƘDc$&Ac%??U^Tys]ajBD"%^ёQNB1(Nď0Gs}}%`! ӎB H"tSjvFULXHh| _gzQ( 0"Z& _((Z)䣣B`OJ2^^<8fa"#57 18L9iS?&L=t8 Fn*b>p'>"CIH aK؉D@,NjœAX#rS!B519&0%Mզ%n/IG@ݥߦ?7d̬ڬ|?FLqxAF&PХaL=0@K#o z؟Z^o];b.YQj֨aCZ2b|e^,NWވ 4 B#nz4N}rT"!ccvے~2T$bLvŐSԊ"GX$%c'QKQC^ժCdTHC6VAZ.ɀ8n/]cx-ojiL:<6QC7\U ɷfm=ȹȰ ;a/x.6Gl8^SkZv$6Pr#6A$,צ W%䌴-3mf>\ Z(U4Sj0IZ[xwy@R2j pOc#sYeAWx޳)R&C6lޜ1f4B5 ͵MJOhE:2sSN]Z 8-[C5+d }H6C4ѣ2*csTiCǕ+˪jDҮH6$zn/ӻ1ze sb9F^ܞ-hTфk}yDZWٸ{}U{H,$nFEC_(w_ubQYojH J9d:wB^щ1]-C\icļ-3p+v脯k|MC 0_8+r lO꾺 EhЗݪKhY`pA)Bӆ"('MI?MՀ6u_< "j˨M "ФtU4udz0 %2kmFJVBld'4]Nױ%?ѻ\Ӱ/v6ӝeE` wmjȅX>2ѻszr#;bHƒkTߨ;ЍI'Ǎrp %t~ڻKz")=w馵H0ML^f^lndeA. yyb%WRHq-!4!zʷ/QXp JF$6]8?l3`'b'E[w=t!/;ϭ(ҋ:;(Rد>ԖX\Mߙxj{g9kؒ}Ҩ)0~gHrvz* xu9>{ +fR,Sf9p;5'H$|d͞b[JckbcjbkjY|/1QfC6N>F{>tW ?c6A{l ьrV4R -TMax "D4& ՞ W;*apJd')4~Oh/+UO/j8FZ!)r> stream x]n0 .O%M+9~) rRQ> stream x}U pSeiKeN֛(.)," X"iMkw$7洛RDZw.+ꊎ;̨?iutٝ{ι}\*.hVThZlbh23kjmc[X)JEmȒ$PVbEF&Oo&:5;pk/ŗK -F]VYLN [:տ^ٺ`6Щ5m3jf2QfЫhkԆ5KWQ/[p#k?Ĭ6jZuIcަ׵k&MCgj[ubnu+h0"m4j5ĮF$hh1 kiLf˨٦۪ijҚL:}Nk(W X()$=RrEMfH@Aj0Q:F%'lPI^IzWќޢ%#-aK%_J}2J(3JjU40+ܑg,w8p%]X 'Pt-~5Y^W\+k+8\:>m{;>CWThTN z0;[FVт˗,&և70smT3+ .]*?RL͙悏᩽= Gb7tpw0W^ox2UX]єhұH0n/e|{%z-Gi9InbZNakoP{;b$"T׎PT0[Z6\?.A|4כLzixxpp-}] VGy3X%4t=KBeЯd4O^o[?" ݵk;y0I7ا^2cNgqߕvd t7>L*IUEw@483 ix:W)Ovtrl%6syh7[At^B7w+4q>ܝ !+XH,+)H~|VfBYf谲7ʬ}v dQpY=#1a׳0 8ޑy7Iy~X]bʿ9\18+;q:1V7v~ryāKup?LYm&cggi) pxG>}bEϡwP&<Thb\w\P|*^kFq;X(Ws/eoҹrHx eap/Q8߃C.ŁM鈻|"IVCpKa]o>k\N:^%<^P ^׻Ϫ/0gS< y2V u_}v~pz翃RH7;$^noY;eƇ<I6Ed?m|7Wx^EaŵԍECϹݒ. ܠbGHH7S8\s0x=u=_IPBu#+d0!U=@A|hqӸ8'=6II GI#] a7@7HxvR5tzHǔ;Y!7qȀ4[ϖJ,7áDroY@8T2 DQZendstream endobj 587 0 obj << /Filter /FlateDecode /Length 254 >> stream x]1n0 EwB7d \%Cd 4DgKRI3E?OvbʡmWjKʝTH~<Օ?SPYkyeڒފX]`w1B9mqtL\pRB\:@SdA+T@*@:@SǺ@jX#t`EH-e׶ +-ges9\Ub.k8eRD ָendstream endobj 588 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2529 >> stream xVytSU~iZXDA[h{(@6!K-MtK^4hj[X8<.P(gd\KoB~}n1^ Bѵiˍg 3h5:l1$^ ?>G}Wx_SF!FIL*SXFf?g0댄|T4A(mDD j@P:>F)YxzͣKԑY՚Ĕ&9ZzWS2igdudLFNUlQG- S]!l_-R5A q:MT6&ǥ4z hC&Ў8F'0 ZMl5k>06pW>ncG=cc{[1{[ 12O8 'F!4&bF¤x cd汩Ome䔇 ~e ^^_bt X UJTQ EuhGv5QjF-h/jEԆ@ eL.OJA.]\-_#?(\~Ʀ =p۸I6O*oMX΀ e3Lz}Sw%lņ؞J3+,2[ŬxaR KJl6C9WXQ^3V #x ˞1Ghޟ˜s,e8,P9Y^$ f> TAЪ$v](lzA 4+gԞ:whn/‹KeCU3Mʾ ϣ۰-I嶲Im2 \z)bοm8o~@'/J\I?-Vm=~n˲q`l$?Y#.z5y ~C1`Jzh.ӡ?~ƲlB-tz7S}~2>Do;R &eI#›rRZX1*k]Ω~һr(|Wc}93;d2I\P^" wCa>{P_yT >y>)RdKN( 1"C]^J"ltC @_*U2$ KN9K_qdAD{B(ȌWb_3RhD2GN d6 -hR}8 kG^Q I䝓R $z <G~#f3s) gh <|癯+7gޅ{ӏ1.=<)bն{__fUy_&*> stream x\wqvriSzbMZ~O;?Wl0U=la(;7g\aq~qY/9do\>vo;\wA ׭7;=GѢfߧחo+IK=_"csZ)ˬmz+!\wq\d)5;UWe.zq? q6+nUoXIK8mF?znPNR7HkiBL\P˯Z玻m33a M^6.[r LWO|Q1cbr\/+&,,v Ƽkz9諾ʞiu Z˟8T$|4=0]JvPq 78߼OP̟@@,HKM>6P}NZSx=&Ιnlok\tZ 9$Gw(K:|=t(DL2XguGiBiWAmHhAOҠ; iB4;ƤziG:h08S=x3u,PcJH@sxd?"iO [i!Fh23_b15;b\Q}PZ먥S]#rM`hC% W#s6&[70yjxYU:bq@AAj<0/}S̃`m ߑv bE {KP7m&QP7)l9ikϫPo.!8?X4Ub*z*k`f{KGWrO s.B>d{f䴈'x0G}v;Vo[Q zoUXB8lL9}n )ZC#d~ڡ[)nga_XΓD9o.^.I~Wi؆1׭xC:B({+ 1$L,[m&WX:qbؒҊO7m,3i;qӧ648~({P`+mgs>e;zݭX#k@&noHn.`!?8#Yc;شvbh $ھN"ԅkezèqPhkqMa~ ׷=ǔv|`u$\'el+Xc ?K]\EɆB*P[ؑ+?-z1!ZС=BoQnm԰CrtAD DTe5eUμMYvyO47e,d - -k;)s},`k D~xL #SoD mG..y.2MJҀhEJ@Gejp-VhpO*?X=nSHǩBٰaY,[1s6ML\/$dO(4茊_ hShdm@RZuıةyIRduq;=-e)ËA?]إS=y+v`m]CRyYAl-¿_k=yL͉e1f'Th',v*38!@л 5c2jg"^WJNc1~Z8&6ñ\O#?Y`|fy9~ș{ Չ,x{S?ZRv$ˠ!k}+Ľ S/]GjA$^* :uT=B6țY!AG(V,-j!Jy@Iil;iZf :< t[N֎s 0fRGz""yzP5U ?KkDExwL,HFe݋c ly<\6\}ՀI `iiFj!s)<$Z*+yc[6eO*63ЫWMئ&SqHţtt@"I;ѷAgg n$ı< ;ѩpLyyw;;c$+ˌjD([IC[ktdev[a&jVFc/?5DaH{jFE::j U$hs \tcagSH`цW#r{5kko'%H GD +걞[:n$)V@WsƧX0?t=iSXF54}JIB)*)2+BEJlG"]B#3:vIV j=5LI5 ]KL vP+YS~13~pYno@*JJIH >uW jԇ%IJĚV 0lMŐ=ӹ!zbH9ޛl6DR=8O o}2bpG&(^ 4`cXnqDg&zVg.  }Actݻu(GqH2h|6T2%5{ل8R0u't2dGItæxJ6;d4U?;H񙡘uyMmyIYxrbKRxB)`y ̦j61@r,[q&nJB z'V `c40,o/ZƊ*‡7V"kMc*cQmA&-}svZF_3%J^b,NJS@-?fR&e'I>~2yV zen{HyMۍgIЯ70{?U XT?:8eug?,_6xi#GV&cSS_O5O 6Kv{˓(kaι<} gvDŽVhi4fvБZ;hY ICH)E\pSGOٙ=bZm[ y&-bǿ*rUsW7qWIjqu [w9.$>5jf #{f 1 Kݔ#?w*|SA=;%{\+".\V1՘]}|)GtP:Y#q< :qpJ=U@Dg\%zIL"k YqJ2j vseo7)HmqFJ׫*}6XZ^q,܂O:z/TǢ MI㱨i^^+_f걱SU+FI ьY?6]?nd5|r Z +l* BAcAβw~6Bk'Oz qbZzέl8x]R8f~4D{1HdSdHOM۳Xϔ [z.tRmAyC}i| 4,^(]+f.cQzQKp7 ̀0iMƒ yZyij!X _x~Iim긂vK0yrr&]Y07Ț@,?pi[)It%K݌)~U{fٴk;j;fWcezIˎZXiD^{⡾)0?~<8ixO'3'ܔ=_9:@:G78Ek1 [|գ^8{GUUzG]Agj`tLʟ> stream xz|D2. һ45DZ0@ Ԅmn6{6[;)pA**W(WPߛ;!$zo;=9yΙ8QPNNN#W^>tgx3 -:_pƉ c:Ё'@G۟CߎL =kӧGJ>cʝC"1!AwrDHѓ"G ?:ߦ/]~un\?yƿA6-wv%QK,]/N®UW'YguA냽B6n ,}yc]0nEޘnSN} W(j .5P㨵Bj<@&R^$j5HMM;5LMMPKVj) I-^VP(j6zzCRש5}u<̝kG|r]~X:rsJUSIW7׼G< U/ş_=`tcƌiˌqo4< ~xrI'OɟN7%j*7ȴӤMi7LO^7jy?Rf]g^(.XF>R /tɐ*8Ă*+M`~K&)ph.mRPJaP*\S?61GnhnC^Ql_(66E2~R{@6%0٨/N'C4Y^ô7({ͦ Y- w4#RnOmuۦw8"OPNb>-?zJS7xZQS@10u.(aZG"YpىvxLf݊|Dh^ VH_ekdeއEEgS(T\F4)g>?T fy8ebX [n=(u82ec6?GO!)DັtJi<9 h*dl&xuY BC"hlB-肨`Z_mμ-Zb@ Bex1`&,MAn_.Im0W8}& ARj2!+):~g&b02 ӉC#1Sg7qbEWx^Eڔ_k Ç2l{Bv`B?CR E/݆v*q $]#nf;3r7vZݵ;=ev "+J:hxDE# 7x7Զ7}s6*+Iԝ26.:1i*|I0_/IcSU;g6ؐv*ʋ:pF0CYfsP#mE[]:m(Ѷ[Rllt~HCu6X4M ZLi ibxPv aP!p6V -*R$'^!!yڼl:>@u&I3=)C 8"4o8YO~7<$;b:j&zuk54bIS5hLl<*׾1R@le9620*U>ǡ&ŮO a U3zg؜8I ]ҽƭ^IfD[)h(Ճ4&IW z:9;I`ƃl9@?T^O*/J͓~yTC/ kQ Y_8tK-"S&'X[ F(Лms5}$ |YR" 'Bi"D :IW7G<✉x8ö㧥_N@bWoh܆ƈq]{٫p Gg{2@y1t(܊ۊGՀVA]9,7W-n!"d]ټyWʎT]hɿEa2lKsAn-ɶ64(v_ǟ[>XHnM'TWDFOTKhv)tQ'4;>hD;H=mP5nVYdoEcI6LD3x ;}h(7 -(cztxL?g2xX>#'m+ņ⋨uDi\=Cm/cA Ė) D帕Qi=j>y5@A;]ʻ@FRjX`w>0{Q\6bs$WLJ{"bVC`Vefdt@0=aXc% '( isD*)ӿ=k{jV_E#e]ӳ~!`PlY62:.[#0 ҒxWV Io44C~VN6t>-n=~S.{sO*K-ڊ@[אx/1=[zJcmDcSȸɛ+oNcO<{;+Zznp8jP593#q힖Jj ]"VA=t qNIQC mHNHힱI[K TZR<+@~_Gmḟ ^*|71;[f|GU"Pʙdg{l Ɇ!ovŭkӖhCx ֭t^8UYePԎ"Rno4R"|FRHˉa#5p54YD~ѨBkH. aFcw?zе +`Лo醢Nr9%NBCF+En'GVCO_'IVGHC&uU,(Hʅx[j3hߔpQO3_f =JJW6Ab&ܐ9>8Qrlc[Vy@*B4 na|G #<"O'Ń \&U _?It1x<+:!|xo!Z756ա9pps㑚c@憚}m` Zl",%2;KY* Ф%ILRuXZDi"Ns4|_1V=_ߓN3|H #M=w\Űdm|:"3D]D|trߒ~eЧ'(' sPl}R,A`}$n8p܇oᧁHesU*%#ΒlSf˗zuFgY 5qi-g'1Tg"“e$)fy~zAREDBhrtRlJp&1+9?j4RUpI㐑4r}9b5ͮ,Eъo4q4Z"GPhrZ'.i͏kF87ҮTB8Mbfl3I/lii~<܁wJN܄&9G_nl! ټK" 9wcE4"OYFALJ{'!"oJ޵; JRIk}U%g < >q#A/IQ6uoJ;nUU -m'>/Њł!x<1)N 8Mrgj}Q_;%82ȒHNNv_PU/#_&uӻ B;3Cp*ijI& \3*8KZ9]bc#D# sˊ[]nؔF $5Ş[_XZeݙsw޾-4”EuoE1EnSn+;-u'hkr S n o&˗~95w,Vۥo>YnT/Q'>Pl9 *Ǿ\V דJA; ο 5߾LQ>@IiO͓a 1ԨSpP̴ISu Fɤ/r?ODdCisơ-h Su`\p=͗1%-LG_E$ehixЇ1:}ЏnAOYR@ pMZ ƍGߤHکUmF3xѳ}p@TM`ߋ~t$&XXrPh@#@qN{TmAX:T_ M_gU|zK{id1LSkZ& jx9Eo$7#/lͰ3uq:p^ȿ`=+ d?OM v.+WL#弟~U]>[Qd*J9؂<8ty$d6Yws骔 MBVInV؏+Pdkͤ(WS$i؄y'/H^ӕ%lL$?j`X3>YB6'ybƎyh1Sj{G$n:+nj-D-jF vD_Vk4k)Ssj+II_U0+oWt u ;N -{{Nx_sՉ?ʻv!-#EHd?$/[E[!L.R{t!GN_$]Ҹ("cK**MBۯmWdMb_4&NP1 .P)rIǥ5D'C|Zf&!/âD#M)S[VLdAWd) I;nE*Cr>e (ydE/ߖtZB1xHӫuZ0rk%kM$ƕ!DuceG CTbV5hvJs+&_Sf&+Y)ڌli 0 2;'pێ>$N韓:A\h -}wny` !A!5l\$`~H-z~9Ԁ[>ڏGax#,AIDqbvنu;\Av̗gr= kSl8Q/Gb}ܜ+YS_ r se:a7yۜ6cclj `U=&_"A%5)*_ГÃ=S2F*fz:UQipB" Eg66X>ή *:9rIX\x4X#hcEhZ?]ѭ 1w8u! أWK Wגۂ$݅YHưSHE~IEE4#攖S!%ǻw?g> stream x[KsN߰b^rnɦd׫`Q$e+>EVR>X{yWg3fW^}떔zq3We!م*/2G3͵4ӥκ~ !w:0c f 0SecLRW,/f˳͙u?iL)V뜛ՙLOg?|pB:9 *{dSRd;KO@"Y nW+0mcA STD7Hn\"%eCi;ت)p.pCF EolR$Cha+lxHZ*X3ƵPFZ{TZ2ClF"Et37DTjY/{[ʭy ~%$f_* !We+hNnCqr*uTfb-^Ԭ-RJw&o6&*$> ]D(S#r+ػ aD7j!Nt@ͪpOPg] >@G+vD܀'e b,&qYq#lRػ#,v˺?"IPv~幟`1R0 aJ"O8PB1Ų wn.p'G .!1B/iD?-hdbmD) 8lqN=4AL.Sr-cd2Slvzy{NBV< ?<%e { [[KJrzJ}H2=ˡ<*eu.9ݬK.| itֱ9iwsn6A =^Z )dkH!i `df96g?F͹98}DE B?3\[!}0*{4IW@Ǩqfc= t~WYS͘B- ;6na$m@ xǦޯ$mP\ OMpqߠ[ȴ`;2x-D\kz;O%U_p*~ZSa92yG* ]HQR>'C-"7"_+Ò4HZ jCڀS!FLGhad+ӬΎgRؒ ܙ펞@Oxc4 *l~aLG#!kI;oKa2X!N't&PIpHpX'Q=D- qyv΃em|KVT.Oef3"Ya\X \$VzMxQ='&a0MX\E5۸F4ϻK@`ɝzjfe`7mvZ AMYeւ26G9n;:Rû"A-1{SY5Xޚn9[;S6|3)q:'skÄ3mk7TccZGN40LM +[6 0~2O^D!<(b-v-T9ASțԉn Fu* {/ʄx^zI9b^ɄԵV~߁@l{x=Nt}l8W5Uy#}ToFShZj8>A$|v4ncNu(K)U )GUYWӮM8! Z-Zi~ n|&zO˛U0K?$p;.Ip@\+a_(Bn=vݍLxaסe44ͨ-8P2 Ue.SMurY^\ID;9υdScIKŧdxKRǪR)C,ip&żJBmo&yP}V;%XO( g\깘 K) P8LeTr{1 B u). KIz^Q5kA[s0w56Zo[;@PJ]{ g6 $W6N;#nWSd<ƒmiX^Pf0m1Y{ JE34ڊ>Vmr\/B?^B"zmG[+?=30D溿 {ϗ}?}hH 19ۃQ3lD[o|&)&ݓ -ɽZϮd3x}˔Gƃ~{Up2Ҫv_%I̓fRC03iESSH^dYK=DLxlo\In{OT(Ԑ`+/z+c)F S$2]mlGxtcq&:;}7[bбCCtƩ'pmQYO3Š DS|R~=XuǽuS T +2}mF_]z]U)7N H"qDDхHhĂ/9^v2IEF1ILrݿwϘ۸JDd+ d>+ Aل ]jqۍcPgfLej;.+0xiQkc3ʆrIP6gG+yZICÉ{EqXD_іZ{ttm CSmJVCx;nn)\u,)OZ^utX.ցf/M)'"t4z{'܎$9o)G*$,Si-xgƢ. u c%1wW[lg&":>GGh mdA8?El|>%=-Vít+,+ HR4 ϓtgazܦcCQ 51[*㐒v-Rϣi)jV(xukYz RصÀ8.ٞqz9\G+Uendstream endobj 592 0 obj << /Filter /FlateDecode /Length 162 >> stream x]O10 jBVUAp$@w}wYvg@>%M/ 0Sat@vz"> stream xcd`ab`ddd v541U~H3a!3#k7s7ˤ9B``fdnls/,L(Q105H : F Eɉy %%@NBp~rfjI%XMFII~yy^bn^~Q̒ ԢT; kiNjc1' ,?:~uz{{ຏ똿/~Nt=w5SLT׽Ys'NjOuxuȳ~;7ws>VY]Y5{ -:#Z+*gwϕ lҜ{俇~_,|C߁ݻ[do.[+7BwZkN>ؚhNo#2ωN۽{ Ǐ^˚r|gc.3`r%E'LսwV Yphqd`]31endstream endobj 594 0 obj << /Filter /FlateDecode /Length 433 >> stream x]1n0D{B70m`4){Y)r'[l1?2_v~k;˸Տ6\qޯ_|u_{ݽF:mpu뗷ڜR*i*M]ێa,NJ\S/NJ\Ya=zTکfUQP+}qXU<`Cv>d-kUJS&WV?! !YFB<~6BhQx4 FcQ(m@6@ hM6 ګzV' \`W+ rvrd xao5 yCA0o7 €! \0 rխz ?7>.W=yzYeUh޲endstream endobj 595 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5378 >> stream xY xSe>!4V9UVG}+N. 钴}NfД.tP m(0"谣\g;O<;O[:3 9xcWd̞3}iVLZJ\Y~DŽCȞg5CWǢ >1IP,ɕ$%gE̙5 fD,K2SS"b2#X9#bX_L"ΈMHIK'FD%DG_n予őׯY7uƃǺ3AX ^(Y]{d/͉Ǯȍ[*au⚤ȔuRק?3 'V/kZbIL&/Qb=1@L#6 MBb:XD 3Ybb6XJD,#^&s*|4GƆףav<>񞱒wf?Cp/psE,*P\[ 4Mlj^U47\E[!al''x,6C>{ tPd6(ud: 5_gK}|>"g{s֤(cWځmU9;剅+HMa*?Y}:( Pf0EU>wU: #)o.4o3ԐCnٟ~N#jp[ӃUċ:C6ƍp?EahzM ag&(Z dLʁGjшֳ @mkr~r˄2nZjk'%sY{#޵'}VW1. 4Hmsߘg3ACn >v*ĹZICIg Zt>)H͜+d4bPO*54M*ГJJբw7"򨁍᎜&L,܄ 0ߠp x «TÌ72f {;7&R;%2UcCe֒. *}PSo烔̩ДW՗X`{6S = q[;,FC n n (h > C9!_θ ĨM`]=k)C@VH_uݾ6}Ԙk̢M`}m c'zrxOBbGbOrBYWsANJ2@*^_eU󑸶\h򤝉9.URA)|!܂=BM!jՇɟÈ~"rN u~@ʄNEl]Tķ=hJ1uωv~Bɉ&H!^ !>)w5۝ t}=ȣxl[Z-p> Cc.R~5}%\>dKqw0L5j<>>u֘tyl_D2`*/r5RLT NiI/$&orT׾W\ ^#mE: Zf u_dG)UYK)xrc-ގF?/W| )ɯS?] g@25m\ 7:pGw|)gC @LNDAo~s.F{4xfywJC>Yv8e[l9dji]u7qL::z02̏ AJ+#ܝ 33A'yKM"ʜvm[A~73b4.[9Ph>"K@C>)[qZm4yCDթr/*ⱯSg(Fq2GĐvIijEB#(Akj? US:B-%ٚ_ {o* cgqmnih*ev;-?__x;!5xH8zl8P 5Ҳ\l> E_Soq_i>k)g2vWc6Pk8uAԿa)CłKwO0MAC%H6r< UB:q#¹el t5Av֢mELp<8*slxEB.U!zAB WS:iBD[Z,>xrںJ(&wAۊK[G}ÃO)JN']p**()bz,v{5ɰ=B7ʴT=B%]X̬2Tb/fR(Aaendstream endobj 596 0 obj << /Filter /FlateDecode /Length 3693 >> stream xn#@ >*NCnMk-}-]Y^?s9qb !J5\/{y{ҌOޝz;J\ގ>;߅6&듸G4aA9ǧJId/ or n|*qj|jgSZ ./y ;%4:DoEᚶ(ވt5okRl2V/Ljx9{,Lg3@ًvc,Wƕ W.pgfgoNNSeߞ 8 8L gHُdf 1Z`aJˬaQ^x|X㢆|6zzT7A0U' 6!eCplٮd:? ɊWrouBh110%5=$D<,?-oo#VBE\+e}8ߍa+|P 1xkǨBqpr,>g#+]VmUPnY #',X:zHqq9M%[R"l(ZߧoVͺ@"PԏhZ1nccIוlC>fҡ:&Q#*PY yA %l w "qA_Bғkg2-xf8WI̴Zw&7؆&yݺ=RZ+[%:5H +Řf|ǧIFڹXfJͲ >)jyذ`92[>̧ ܬD3VQ:;6u`ul2Ǐlr }WJG Xٷ28KDhNgD$Maoao+4:nM|\sޕړEff g:NFT  XL.lI Q؀ crl>.8'.|FOl'AF+O\0:Űb~uڪFPhAuKg@ CckY.E Y̲lˋE!L³Owz)l%pV5a >m_t¦dFm*0%J~/I!Ym}~@Hz\tb؀Uunt |?X_ d MU WVu\2!g!QՎčZEt w' !q_#J}4 E*rtj̐>T~>tɌ؁M¶"mwr* z=7~-k|V ճE m۬vt!fD!QD \*RM3-/!`{?tQɗNMʖYSfH_tNAnQo4%L)MXӱ:fF9mS~c2;fUۉALňtH֓"=$D5Dh|O}=&qABST y񦏎m菸`m\7B!v0Uw IH!#wxͽס\a+L0 Ů⑒,WHd6l89X,\ܗܙM{Uv@8`l'! 9U`I(ا jV1%pFpPJi>FoZ/ +Ka eCmSV߷'7K.(bYmlu2 SEnbu dG/:7JgeZb+0`!|JfEA1+# hdV)+zjT6޴e|*,Vy~AaVW M:ȋ C eZkjP砹RI.zZ >Yf3e ]řE SuS#bm͎ f#/Q͂!P*g }ʌR-6zkH+Mw (s3("0a"f~?D!sJwj]p r9x:N觃F{W:;hdƖ5\+إ,$rBչՙdu6k(8i2ePNIp4Qi_,zAA%?wP[JцɻX,bDžXzjL$)0 QCIe! G^(,̌H~2RI(*[%qGq\e2İQJ9A»svₘ;c"v!_u-uzlM^f>ve;T5K]n-b}J[lh[S5 셀#^E%OyG}ljTDIX)c:G|l۬pYŧy>v32Ghc,ݻIdy=Ս$'v\Fheki0)O*M S[?Ֆ:L^t0ύ_b6gl.p(ŭ̑CqD, (6qGm >Ԣɹ@/Cw X*@݅:I.`N_l`ZIbA␦Sa-?v^g 4\b17>vu菫 Z;+YMoY=CaM:& 2Ap$8whdlȇ+OI2M(n'ōKMM{'>.O1]웠\8a*kePRΝ%6f܁4DF/DKG=~S~1@cLj=:.HԸu;c!ԝ@w֛@^C Uc81َ`m8+JXyFjeV_LegQj{Ux;?Tavo:Ovc 7VVn;{;8ժ {-;JVRxQ\WLbe_.{{|]7- mc;gA:jǾ^ L%iQ=?lx'0 endstream endobj 597 0 obj << /Filter /FlateDecode /Length 2521 >> stream xYr}7Ob3Jm줊1g(#Q:_)\yyz7gfԶnj˷THhLn|u;ss ̕TrͮﲯŒsȻE+rX2 ],\jcfƐj]{miIVYl."ȁUpL& }X}1xR:_raWoKzHewݿn{f p0d}M.ǦyFu7Cb_VbRIa|le.,zx⛲W?O}dSv30g*ZE_bshNeg455/̕׋}HƎ`E#?a %7B*eN6ծ)w1f"^u (RIvܸdj]>(49M~ufs!bajPuU @rsp7`r ϣ݇d/Hոg UL#:M5ŝ!V.Gg<"`F$S΁L#K!sw`ESyrm&@ f鐺.Ѣ\XF?BkrAFIE z5ф-@F公4/B]&.LrgL;YFGҼSnڕ Bu~Yד{YM 1$ḓOa 8F?.X u4 ys3&};?s`E`.ɦzI &˞zc-ufD?<Aȧcp^qF>:c;G7B1z3kNT~j&G80h+4ExCԺhw&BNAz#Xq?\0U \|PB JðmXPOb7JY)A\R#29Y5@e]};L.FI"{܁v#%d.ݠ85ntFqU#fWTKc"gaoCRg'ԂfE?ʒEO5@ե1tˍM6 |2ƊTS (ܫ5p'۰"Lkg*]-M*/7 /ݰTY]rV,^ 9K `?b gŐa,pa3Qv;ےؑ@0ŒlmkF@?EWJѶ>bg"(G EzF4AɸFK/|\[K [EQ}1+SJ"9{>b̘ܹݳdxF_Tƕ6'B1 /rzӈRHՀUqbףEKFHD3!`k['$J.{r (y:tI^A(~Bl|Rg.̪21 읟BZ ; Ucr:.M!?v"uh F/?qP8Ӆ{Cտ%J8_gͯ1j!77k\s*FJf9Àx T=[]5xxʕ˸9c @=6`Z՛B/Գ Im rz XNWy&/Bs_1pwُ3Y@0)q c ?9 8 ^pR =PkJrTF ]QG? IQ5d$ڟ)'#+'[i<&n/ol%TA(涮RʾM#!D(~oj󑑻 ҕ8EyA|M99-֡+crr7pxVi 2ݧR2(^!ri촞CeF[µ 3 =T1=H=y{4cQcW⺯bm=hkbKUlo{'-j7QOysCs3"}uɺ%-쵴qz~!t#OXEJ$,mv)4E '3Vm}UR<ْnjSMSmxnr|jCZ7>sR$q,H+ip]1Fr%!@dwOVT c/9ǜRy-^j-B_Abmݳo˧`">~AtG+/'߆~7(q/$ٿendstream endobj 598 0 obj << /Filter /FlateDecode /Length 2371 >> stream xYo#ߐ/G{K A +=%ZVJs.)%Iq83f44?UO NU=b~Lntq7q{贠S-uf. alԂ'0z$u~1UhC~댩oY.!ef?Z92x1^RcyaI傉\B,YG;nedA Uv0Ü1'tH`$cLMioLr (iN9Y3ȟfs$%4!2$CS|7Əms;C(Ϥ5,ȑϮ/oNWO7u\\z9ش-]uV ⤾H4Wɾ7#y,dPַM#H{Pn` 몎Iˠ"nf"5to'VW?\ Ki :Fx+'&Q1 v+O>Mw&:VdK3@ DuyTÐ C> e<{F6KzaP=bZ}<8ԣ˘#`O3l,9gر0T߬Jt\iX)G))#ٸSsK=~3H.!0N(O&9uS¶aJ1g)ݼC! u%a{jrL {дJ[{k Px˅ LYЉ.YD AU*!ՄJX]'t8UB. y\˃LZwܴne uIΣEw+K3a;8$;}HWd#Ŭ@Pt q m4BPRs>KBS1f]2&Bah"\]X2jE;8ԇ(G8f=!m [z xLKfT6$ Ñ4Sn@{ YxuP0ֹ'%.ը"`yI!h"eZgAαT\=10-HcKH;v؆-*>.8_c\/XQ%bmqcn"54rf')ӨƉQ1@iA!ʍr1+BmLO$!mc P:5T>#> stream xXKF7 啸'Ivc10%q,&5!);Ù$VUuUuW_yy,:zun۝߭g@ "-|};3:ty&r>(-x$ B1XgT,hYAӧSId8-a&M R֍YK#}O]kL xX-VJ9ZI*4ndNs zx;y)C:f)Tq3M&^2+qJ+.\)weNe_J2w/V4c9r);h({| v9ϧsW݊ Oy"xa~qM?:1ñ캪3(QZE[Ƈjwi۪qElFo(NvC-~Z< |XLnLpҜ{Sˮ!zn>zoXՕϾ1} mHBJ/h"x=D"IS}~EJ)%(vFJrH(ňA'.jhMg%'eʔ*ȼG_g? y`Iu~ȡ]fBq߷3/?5fY=l3"xhI>f".;6nr}ҟ}CMhR]~^4d3I3>3؁]>*\z#3UEk]â p<3e[^P,s#\`_gT2BS8UnZĕ40K'IdؓfxA{ جp(*STIh<CqMhS*w}h!*C(yƩsOJ `S9Q0&: r#-(([2>ؽU1"甊b4F8Cl(krnχp$p=WQ-= ll)`ܰeAR8!lMb-dF <]d8ËsK#VQlF6Ǻwc*(8f ҋ~HOO l-}ncfw8\S4A&diQVT[G]n^TlR>;sQ[^MA#܌@o] fe=zYYE?d;nS|6+(q$=(Ǩbs(!ծxT]zhLҔ|]4fP܆L`|%7)<^ƣb@ZwQxW45tNDա\Re+MsB LR\P`~_E4@G p!s?Fpf'Xb/rfC1%iILBEii&;#FvocGΚrmA筑SA^*4ކ7Q&9h8Nǧ&F+Zi(] d`ށ y1+14„A1N>;T֪ 5! `U89FBw0_c)N> stream xYےܶ}oԾ2ĕ+Il)'VʖJwf+k;?n\HYiDF/1yF9|}Qu[,gyCO /37Ω`ͼEf/ee0vg _\3Ԋl\j2B3cHGʮyuiIY."/;z9xEUq4hN K,L^8\cƸf+ӐoL 㦬SSy:,W6eߴ/ܮn^1&9j6 0SYH:uPW 6<ir> ,{=~XPn{[mZU&m'o'kO~0E9<8}1L x`?R[`n癣r>ARD*3 mUdn\5m+pf Ħ?fX,25vؠsf=nL./yаǵFi9a; ;@zĉUi/s.P+SLj[E:;ۙHSy1e_Om5KpQP^ 4o(eR٤H&i3Ja"=Y8q̒6yQ\,#p}> ]uA}`0cTuUycpd%$ ԭ2͉ToNp -s/ Azl%~hJƣ wm|OwZN;ꚛjHO͕_ȋHW-r9 g9 -nO{^ -ntk wЂ*z_V6~/ճyT!<'䫀T0 ji Yx X+WAeY~ABF2*Ȗ L&zPf>ɿTĿ^ޝw?d5:.0^{j^<>I!Hr]wJ0LA6(<'.KG5sMNBQ隼-J^]GGE}; m⍖]W3wVއ/x٩d/m̿=SѺN?'/CmS.Krk~9sTA,Hn1Wx ?v|t¤0xéugqp@m[mGrdrmrk1[n2kǤ!/\;oYa`'*rl.%TX7w%=YlwysAB^e r}]f?z܉ tSlc|/?;cZ8\ :jyS6> stream x]O1 y@BU5C%]2~1!C@Hg|wu&/U$i2XX8Kxr&yU!t/V뎑UR 8()J747I3 6SCendstream endobj 602 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 279 >> stream xcd`ab`dd M34 JM/I,f!Cܬ<<,{7 }_1<9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C50\``````(a`bdd?3qG߷0~բxy؜u̙[r| 87mBU\XBy8L;ódKމ}}}&20fTendstream endobj 603 0 obj << /Filter /FlateDecode /Length 191 >> stream x]PA  ~ Zib؋6M.@}azIf~DZhaendstream endobj 604 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 973 >> stream xmoLuڲfejƼfzw/6L`{e1φ[VXB[hKK=Z0 -cݹ1̀,f|cT4ka^=zjmtjZsu۬qiaN9Zhe^v:K8KsDS3~3͇k?!a؎a걽.fϺۺg;M[_pɑt$v7Mp~4 s0Ia/blr.*Rx`R`!yZ%˅%f\ g XgYgs :.aq ,B)O$Vq84 aQr˹}Hr_r_>2T9O RZ.{lmۀN5z"S!eSBfkΪ@T$>⩥Co$8>FF0B m_u?\C<툘zAV~nz[\Jt治sbX:{U4I;RVP"%II۸DItBl.NNy!;- kށb ?GƒK)B򯒌+zwt㑡(vT9&^/-҅l>rcut< :a-I˹U|sTy{ CrB" #j% n"U1Pګc)olzV :&[Q?!oo& 1@FGF=7E{a^./-T`^9B}]OQ{ -NRKp|v&M Fq)72ҕendstream endobj 605 0 obj << /Filter /FlateDecode /Length 2196 >> stream xY[o~o64/ARmE6>H"eqm++)=s۝Y-eJvТ-ݹo΍)qr;! or;b7hx-tx7 kЩ[.⽛,x1HL`=.Vsf mQӍfLa C8h=#H)wş92?Gq ˜tM8)`" ͅp*Vԇ% 5ʠUIx LSA7Jƴ,˂CGo#5>L83k~S5j fvHBf{\ky`sCrk$ц(s8rno$CuELRʇO.S 8:rKBo}в+Zj T nfS$ںEbI5Rv_mT`XuXErR1!\U=-l bS䲋+*:ph|Hw`Hȗ'n+"С#Gq*J)ح+z~RUU;:ؠCzSNPcDn?2) ~?pνZDJX)}pLLtPceEir>-j._-;5c62 k~!:J.`: CqiZ5 )L41XHWwu+y0W6'dbJCa< IL/„'@1fúy}4~_Y/Trd|_ 'fq͘%YVP@ T\D,p5b~"T<_3,DzHd(yu>`),fƅaz_XRH85[`xݯ3dRujC 8QtFhZ!P5"%2W9͔bs8fm[X@TNe֩X.|*ow?H!>ϟ~̡GF9 frcIh p݃=rܮ` ;!0A26ӳa6"aC 0eZq=]ex}έ` nuw^[=Ԙ_ƕ$/:Mbb2NGr){T T'|Fa/v1M-(7x3n[As3];r&ċ2 mq/5^ *>V|@4Sڝ.&X;VC3sHހF0?۸_=&lnZ]$Zis-$%NӞ=6F<"4arS#Wpo'4Pu61l7Nz9s[o\B3ls4*ģF?֠ۛ(0^Աߜ#z('m'!m㩤2پ˭'2q;vDV)KRIpp+,?./Td@7OQB\0=X[7O%\[sN/8zޣj4zX^ r`s\\w@.qUj{/u?,ThED>=1=,[X ѷ~t!<)U10ѧs* = 9 _ \ɿ=endstream endobj 606 0 obj << /Filter /FlateDecode /Length 1402 >> stream xWmo6߰B>Q]d Xۇu!/Z[Zd%[QWHi;[7xx>#f߫duZg †-fT0,JYbeϮK 6r%%*j6ya͌A*>vD K&D&:u&܎_QK-$Uz[%Cw(jDa1:ҮQ=!oF}vڄ2N8/ Ʀ}!/޸G1NTkڅYA90{xM./(ĆFP,8-ųkڵkPL|̩q@d)N`Kh@]Ap0QYpBH]j꾮vP䴯pD$zTC1QQR=}y mM&F|ۇTSyᙒȢ͙E~oA JQ$''T]=xI\7CҠ_!&}6 fp0'7]6i&!茏)Wuhu֥@]tI 0QcR@)>#z{FMx V}&A۵%&%J}3JOʘmcWjw,ͪf;yݸ>ڝ72$4l.؝gQ4qڅzi?G8NR{n5ۯ%"]·ljĜ>9n?tp.J v_MTc\27Wy2(BJC3b;O,[bDJ"Gd"52QR&SIX2]HcDB$@>B]Ēi$JL*Lu)#(YA vZ r028" e$O؉^T 7dZa€(=ZZ]yGY 30-dnHۡw%_/III4c9Wg[:_a+}սm];)T1@nB7OO]& ǻDI[;&V[s"V v֎ήԦt9 aLC!}wz|?~*j37sZcsHϳS0q ;8.]L$s1tυ%8Cۥc?x3a_rT\ а=),3wn\J*I %?nxny[j=qy{o xrA7 |qqaXPXqz{药/`endstream endobj 607 0 obj << /Filter /FlateDecode /Length 2441 >> stream xYKs/$O`jOT&䰕LegU=Z-P8>xPEٞdS>/, n͟f̨:wVYRi:_>ά t<\ΗYG gH,׳;+o "פR deq6LfUs~#uOa7~}(k|$ Ie& !de2dA Un9wwfoNMKRn'*W{;G'X¨h/U >Y"8SdKҷMU{3P"/(DTR>9,5B7m-A)wecRZU8u_Q;!"BBb`+)Nrx}g4gJb̀ɂfBPQҞ28?ݦ9>e>f6va}#6UYi9yxT5OO7sC~pe+9e:z !3D`vP ~C@p%C!v3Y5yKS KvRpg6ĮR2n Rʊh1-dl#+_T4\"q $v]ɏKfuŌD6r }r _o&4b9IjI0EE.y %@2 :'+@|]8F2/Ux`*DP7[6$ Li.'>h(n{7w7aY x:NBy4LZ,~"76n>u5pʬ7°~#xȤ}_HpN ޣ -RN1 M:_ACs8ca62{DZT)q@"As% E ҦThibtuUM?i Ns@F&Sta۞3 ?;iÔb,Uܼń& !TH;m $o!zB.`YPbf<_ԕk*Lu98wMCZܓW9]qGJ<&HE-PLր}):e8aplnM)(dS-lSZ%#ҮrY "OY.b) Ⴓ?`C3SNa<~ZzyX=WB[azsQPo' el? 1KNBok.T!(56BKhXO3SsnSN`e8]|L>J͓ >m_v|dqtpc6υ7)Sb2=q6BdPAɋL*dFEcɵ,5h 2mk'v9jH}sF?Y,N[7 !g߆U86nS xc4MW"0h^Ÿɟ͗iR)jHgHG>L#7s"tnK30n[ط/nX/İ5P8Q!-? Sn (̃Ma;JYaP 7]e㱉녡S&b~o x }JZݱV%Fŧ<6s)AΘ<{'w@!,UWnjAmBdf}߸aRb|(Ra16vJB76RruL'/öpF*Ե))#&[_PzmP} Qĩom`˿r-c4bt$4݃ƾJO i ]^Hq=endstream endobj 608 0 obj << /Filter /FlateDecode /Length 2674 >> stream xYێ}W̛8$'1b'l-`v(.r&WS=$; 0 tO]z`&;~7n?/P /ֆTM՛7EST}v/r彛ju㫥Lٚ:>.WK՛Xk9uۜY?7Y%Wӹ4Q&cō i}Y(q}]po1x)g)\鵈/3c?yf?dM>u a^.~\jL͑!Lq&[S~ś2G?.̔XT =0ŕ i'Ŧ)WPPePL@h#.GB@p$?YT (M% %F JrdLP‚yUR Pi+ë z:2k'X ;Cyʝ)B!i}f\/4YnLDPlI"g52arJ8eW)L&G q ׈vFlE}9Y`㪪 %4q \^€Z[ % W0x)T8\l"TFA < RlQ)$5 ;*蔂t&bNP{(BJȪ~qxE7y ԫV7Z>g=\4Gn}zW;"rXhYA@!)L ;RLK\j^BSaPGCkk%3ZAX\,tJ.|J -ؙ(^CDCx셧 G1UOJf-K9&dјDM'SaӔC/0ySjG0ʶ3Ud J9ګ-I٢P49pZB l, ^( XvĎSPr3v%IL* sbhbU jCFHOleQ$I0g(Gh\QїX#F]%u=Iad-V"0#J9Ya(K^F/$adDڂM xgѨ$WB*"aȒہ\)P8BF;y Z,f~RmP"+ (;ʨBrFMzzIs\uVchEyƧ)Q uLx JnT0͚dgWX*yB$!Dy3qn3nJr12y xEZRoѬ/ '/0gy˒7^PD[e:T;t5g_>5 qFh ]5rBMF(ɰzs44pFm.wW&sY(D;IoTД']¥Fz`#[-FɼbhXtAvb2 7~?sϏ*g; Ȭ=SC`> .]+j5u 1*af=@GПD(,\('݊~bIqV^@VhovPArjz]mht||>uТ 7'0,CsXaWa}0799 +(ٞ+ey{ضO7)*~۰wK#{V%խHng?1ǟ ogL/XhS[q>?1vZAL?wO4UQ~G }]nendstream endobj 609 0 obj << /Filter /FlateDecode /Length 48127 >> stream xI4[%4_q.o"!$$)1@ JEQ4h}lf(@T*ٱf[J_o?~_ײ[ ?SNs>鏿9c/ОO}h>̾kWߙs7WUF?GcR_OO_yY|>ޟ_an_\0~K0qu{>9{w-W:?WxGx{3-jkz~\ݿo~׿~52q~ܟ=k% Y;3J=iF*;v t 9;.qIH2[l  1}k 0F,91Wtc|Ϧ{!2q>neHD=cjF0۞97@S3?/T'N"3'hY'9n"Û9;lD83x@6_L NU5"%DA0i!sA43㋇ĥ!n 0;Dv"]yXk[<{΃U}iMT0.p'+3#_4Niogy,d )/d%RMd`[Hs& ٚy?!}nDh{֌D L}t}\iz;6) G7RhsݿAzFmOfdȊLRs?g8_c}߃5]ʨ_AްZY?&Nϕs籓<g"9XWz&;k.?H!Wr<ߝ_A$re5"k/&Q-Yw7—v}FqiG5Yv-lakӁRJ }Fx#W 棽{w:b5<%W/ZU.y|1q|$#F&|FD!EU#8bXno_Xqus݃WZ\JLSSfȲAnX ]H5܊bO(vel#0ABie8D~8|Ip 49~`D8U~C&MEԾ6v@0n&7ܰVXp=l.#V< ym!#5b3E52  6A!50l244!4nd( CI[y_ Tfy޴YBj"xw] t!Zy{s?!B{N;!]>f^Kr " H̔b;%V#0Ş3k9}|/c(LŁOo{%[R E'RS #jHkCXٗtso\l}1 ^ś˜s&+7H^ -) l3^yOsH=Dj؎k"4 \rAlD>5s >w 0Rkd6@8SǯL̯ ~ Wmd3(Fy2PD0Ff&ÊX+Lg.&ױشKE-(9nfJk '#Ӈ6DuX(DD!X2HZ}Չtd .Y{g:ljf*3V1@spၒ:>4o{z֬kgd^ifD 9ly r>m'|,9"x 8p|d<:zǃ0Kzx‘4!2gxZ7,9i&1ȳfbr3&Ӄ8_",{lS~U:DV")"<ƁDyfffX0q&p&Bp{=t1!cx5o|?a^p."o%DOy" 9VWwd2.{ Qb\!cy7܋H #tVҏ:f  ?7o1.=x2]5񙝌 WYWXn~+WAImy YVUdLٙ =#奨fpSjۏ5k1K8)2ΦB#w T^p~#i `;-zL+Fde$# s @HƳ{5҅$aׁ` 6"{yx9~E6tx])w1R"[q֌rM6ɖ5!3bx%lUt_?oDD`}ݝVLǘ)ʼnd1'0(R\8Wi۲`Oǿ{/myu>FMc]1n03|z!3&fQY`5ܹ -政МEc]]QIZ1_}>vXZȊ3^9@:]r4m .ڛqSG0e{?2k*3"kxNu.ƒ^uYi +꾰Ak:BPQ96xIo%[x@=~ݖ27׈p.55G!B>VȠ o"9ߢ3*D8Qz+zyFI^zQx]MNrr!^-b.zw4ߌj[[^oёٖ00Ҿhc[47"h.Ax&&FBw3U?Ҫp҂҉$ʿ^ÇDx@Yp Uz vՑlBQμ: u0W;HeK_̍|KKuKOhݶD©+ Qh_d|p!KərU_dxzMH)cQ; ?QJQ1Qo݃|i 'Nxӫݹ4pڕ&] Et!yf^˦ݪl~$k!ڠaC\X-|0ʟFP(Wy紓G>{WCJR8c$ҔL^@ m?)瞧Fi{{ jA# (N~j+pcߵ>_8jkt=>D..ﷰ}ݬ!maqL M~%pl[$VC[rBeq  >šd AʲbZeXa2cz&SQPb ?LhXm5ִ ] !d Vj`1&g[EI%`Q +[t ·OI2S<ϥtR(1^d}0]xԈӦϲ]vzJ0glDv.=HSHF6yM!~{$6EFb]>ҥS6ghyo|(ub6RSљ)x }|D{3NmcgBj{\G" 8p~V,Sq柑z"SBn`7q4K꿊fGgѱTM{0T023oI=4`:rİCڨL7md9&*.Y=Uiu+%:w(U}ә4BjP*N39x6딶T U{VjH  LErqQ%gүnUyLSD| 2@)x0"*wUvU~IӢ̹c+]%5Hc3crua" *nm kh2խ9dFxQb<_(a<HJǰHīבeI5e׻ecfEDt+kiׂo U:U7I{ -3UUq@豵x f[E0n%k0Bb񧫖Vge}um * >"gS'#{GMdc[o[,V eZʳ$șAdۺ:W6ڶ-۞ Gm%l)"QԎ_ڼ4ڹZ0ҮTU6XN8XM^LaӞ%mW3#3Dxy]`#&y$E} Gw`IxQ8ӋyIEGQxvUY!#2<;rv{-ƴPG5ta^}UYe c RHuڗWӬ(%AOo#/Tձ<.]pf8:WU$# 4rgJ9pCtb̀]y(00ulQ9µVVr5CO-jEOYI~f.%4qc2Q=zsrР%,L'9/30rǶ01 W5 !Li|(90U0 Al8xQ;.LKZċ;>[/;>uyWF>rU9KK2c.*y)R,~VJ}aŠ€:a}y?RCAb3vt-xf)b$lC# pA$>e.Qg?"[v;~QklY%]3EgzE{8t R{y|,au-6a D%c5,xPr%Byok$y}0yS{k۬;6'`Zq[Ny~E8Һen<66!x)t")NY^3n'1m?y0Eb3S&2hkaG5 ΕaĚY &΃5al\cnrk^.k|;PZkE"<5]Z*'4\vke0 Uh{5[A82?U%v*!R rw 3bU|,~AA6*`qn cbZA S{C܇9>okm~B*^Ux _SM&xv ȧ-kpI VDH,] ]O ;U2 6 eGI42Q3v1wǟj̑H{>abY qwr u\O6[)ɨlB'%D>KEy 4""Y5f*P *"Utݧ9)( #kʁuG4{U$@%S =D(SɬIreqp1 j(4P0KhE$uDl&KhDdy!ňҍN%Ekh2}#@\{goC*W z)XtP,Wϩt=h)F0s8 kʜ63R0*^>T3MgO`"rbө0「O-9-\(eRXcCS둟CϠtk PM/H2@bE:/L@-8ā5J?:\hysF@X- K~?RAP/1=(BF"?HͼKå#A9'KU"]B1A2"7$)RBޕلr.d$CU#a Z^י*kۛr9ޜ/+a%En6b"OZv3'F[5*SwE(#ḧ́X`(y"xMȐ㵴Dsa ahGm Nm%;u K8Z#0nD7bdmu Y@cN).b6s~ Qu "Gx) 5RP43zɑ$ aѧHPnF|þT.&q dS+ZU cR UlQjۚXKWhk0@BE@ 'Ĉړ_W8k7/Uy;D8$B.Zx]xv b]a2-\!NZ85_Ux'ȐV3!҇8e!Soi[G JE<ǡZDMńE@H N.rM.zw)WT#Uhbx$/l$5ʥ>P $p:Cx"ehO1(Z& ^1 x\rgz{LhK긆v-YM͝9BTQTeעlsKRvr/b֭S.vJMeE: @>o/R owhum$qKH5ENU[S J1+1_c*O)g=JQb4!Hl~!UTRa\ڌe j_dQ~ҲXiE6%?} ?z/PC_;&>#l僇F_`m!(_B6}#::3+2̓02s9(X)O 4-t?fzo]Wbbu6TC@iOQעULi[ rw z;K"Ayǎқ5*STܼz${zo2q7~s3E"id:k~lʑU)mUУwE-C,WᕮޭsRGU3Y7U]CYuf.~a7}Buy)Ǫy^%Vޑ 4kjP]P:Yևy钙T3Q(Z[^3\mC[ clDqWͺ.sH?#_]lAby93FA?_[=DkV1hCvsZi34gAU|"KVPwzL*zFr|]#Ij ((rj.t fhGo'od^.Ob&[V DYzQ [ Ur_g)^Xp)7CN/+EeJ3uP,Ԭ[^H Rif-i⁡@Ҧ3T Ԓ>Bl.35EhGc?t1]Z*kN; Cլ-qq?@vX!w(JaqEP8̞Q!D ݩ25څ (ޥ>bͣ$!ۛ8VŒPݡbu'ew I)\hi} Ĝ& H礂;Lm2Y,/m/!'%wjJu&$.`' 򷸒_tp2llQ O}ysa+w[JU5->ES73t^|3b3Uptb@/>t-S^f ᲾMhE5o?|o Nw6T~MҢ13[T=f[$]t_dܭNRP6/DE{̘+P̜yʶ`3r70tWHyY"=Ao"F7Vv,6PV=XǞA,FǷf2hELEb:A,h,gll J!+ lo0ޮT*"Ƴ* U9]e wmqj za?5E]e3qWY hv⮲-L+&뮲@.U(* jm⮲@WWY>d,0{ԇv`lEU ,wm7C.Z]eT*UM*ϮHf\!a.RsWY Źn3@zp\1oU;j-琮(mvJ~ ve2-UfM* }fQ ,HyʢHf$vb YFa漣VCm |,.f҅]3]|ʶH,7U&l" eUPg[̥e[Y n"9pO,wװ)"krGAAjv,8E-3q_YT}ş3G1)"d_T RV Gm+ۂ_T2xJSA:V;vfCbme!-T_?o[Lf[Y l-l)ne0zṼ$:+"r.>@?ogK ]kj !HPe`Sʷ'ޏk_._|L O1{9Gyh'zypok϶Sx*CQќ8W "'rUH_%/CԵ]4}jAR2Ve4뼫dMme07ITw}٤, JMYH3HKVw{76ui7tsddsJo#/ Z#oYݒg'RUp5UckEA/ `ey{6pR[{˂ҽe-|@S@*s2X)a- &{&0{ˆBkNr##K.J[8ੵl2#ZBeֲT?okY xn- ;[˂y3Kv5cUCֲ;,R?okY 9ū޲@Z|իP]FTDD[\,2lG۹˒-+D|(et!4Rn\\Z-@Cly,Ҥ T!(,l5 Mj{ܤc?2]~B!q9iBJL7Ml. HNcKqs$RgsY"'~_H;Sh|"}#{ܫ>WӈUm$[l.r\6'3ws|n.oe_&˾Mn.ˉ'Ma$dWHƣe_|gsY.76a],,kV.Q\n'żIdb?`s{)n. c%_l{\r^?ooY]j-3jeiKw,˒kV1O,ΏcZ5[Xet!޵UWg%8 EqWejB@ s>trܷ$w#b,, kqjSG+Ld|ihr!4ӀΐLVnV4eaן 1|J2qZ',Mu{Ye#^ an/ _ѫlh,T\[eᘩ۬dt;hQsO(:˲imRlOx'tk 4 , ̠Jov$;\֤8uDtsSJg-p(6OT,s IiudldN@G1^R2BpH22fuvcD]k"Cy3T3Pi3]"L9P=cm.Q9x}t,XŒIa@:GFrƇ22)xe'!:Eh2q!V˨[a"A mS{6IՖyM ̈?7vwh7@:_ Z ͫ;jlO3gv`lYoo^"C;wչ7O eY E/qQ-@rٺP Gˉ21Zn\5Z2X iH#3TK&BwM%ʩBTdκ,RBC[򼻪sSsHoWm͇_# Y!.ތٺ ff;2?7b\^bmmJ~sY$x̏RB~hzmo53g<[ <ө:[RSOiӲƓeG5wyJ"=$^z}*PV{WƳνJэfl)ov"xҳx7!#vnjd_[e-vOtͰUwU>W!':m﮲3o[٤POePv;Es߮Zxbߎ*uش >'ŴNncVxU>qo~Kd 1[@tY )^Pb5Mϭf}B5 E0~>Z$φ \:n5 ]B1BLf\T!nٵ#"\![@aRzPUE,",j7f@lI WC\9:E¶,;n$kՂ@a$M]lUdMDSb$S $Tf@,xt8l^68;X}Bnء{6؁coсmwd qurpY3,h-awuKX])$I:סl2B{3؂nVmס'9ĉffӚf?JYOWsl[__nYF, IF"x>03;U^y"M ?^wwvSIh@Mf))woFq[-_iwyr>׸=Ǫ~f^`]7} fegٷQN?9uYde\Vq1p̕%k` @Ϩ1$Feȧ[ˈR(\∁MYf!dݷD*G+""+bʫT2uPӕ"Ya4{-X !*7:VI;Uo'Bg.8zeTzS 10_55[CTR㮫LoU gS5?BEڑ]'^wXlPؓytZ&15í-SfPb]`l#EsvRlLW֔dN&TvbU ױ&O*?r 8<_!9a tvmZx = )õ=w'Uw3~G| b7|(eގɢV bNEn ]Bbר=K A8[CI'ck;j=L,DNWSwEP~[[:$9%q7=IQg%=C%K(/Ǹ~5M8Ư[3e-1#ߧъ%v>9O7̩ogK7%7 jq;k+HjV #!4!h*5ZUKip/f.HV Þ)K6Hv$73,奰O¦w*ڀSi Gڡ")Ww:7e>1]!7mSC۳s{ɟH={'!$8K?&oOEe4u s*aNpkeh@A2( 5W{]%f'NTt 75{!?nx.зHŹlG Uh{*ER`Z޸`"SY5" 230B|GU$Bܨ(7:6i7AndXni(ӂ.ӻ@4%r#F %`̐SdX eǓq"9ıKJBu=w=\fawjN wFmnafǐ25N€[VvOkNNG0u Cp=Cޞ ,EO@MWWveX} ;Cy{U핥!oOҁ :$>vħF>N!!p(?, Z1uB{R*[|qZ@M|τ~n0۬OKX+c"=`>fcj+;#i_>6|T2vX?@?Aq@fQ :CޟbuTuv_S"0(h)pT䙆%!`@[x U%uŊugžțEv 1hh>(nO1͡^*|cCfFs@e!n1p" n/N!!n~u#B$(n U$Խ;X#<} }YV'ng#-wCgn -:mytkF>wRL#BG7|H u{!ɆT5Ƈ=ћÀ䇪 L7fQ^Ɍ :#BhOT0x9Q*5.Q6JJc=yޅ9([lPig\ CڳT'c*za1R2%Wu]n](J@hc%=WtHL6X j4|yVΖdl8d\Q_\U<>Di*\խQ> -/*Y4Zj6Npi57"6H!;ZFh;Z^S+UF[_Ud4}ЫNw|I T:D@;ІF>6u&?7*{:; "f#Tejt]l!ƺ8}#(~ΐX^}-d 4`r GCI5#-5tFb{!OrQO.5hTD[5,7DZ \~{m%ٸRM/ n@%k1n{;+`"6(U-4a ن/GD^%9ᤨ. pdh|N)h7ʫuKͅ6|Xmc;Y:jbj`Xwv* [2ĺ\\ly7H.F+hD BĽDBN_!2VM^6 ._s(9lɌavVԔ9hCqnłjpCf#NŠ65JiA@fU #N/(@폎[xG7eէ~Jv )aoofU`;L8w0Pwgqu!Cq @e2SfF?3;T7jGU{J-uAKDEu]\Ui 9fJm7U8S-N@UB.?B(+2̢4I- 1802o7 Pt[޽N2Qm9ԕ\iC9c~vn{a=;St֧-S^Ftg!l@$ر[g=kwk̴n >a`&l,(Nj/.n?~ =U!)4d^(]^7jq#2ƥ\WەTZɘhPheEЉuvB$0)`qT{\7 $1L#p$n-Qtј?\pWCҝ|HΩ4GIR[ݓ,bM&ggv~&SB u$1U]쪡=ZR"'0XڦY;\uMƒРOFTz:;\jMD ,BMexZ,e!Wteg[-wjk,OXH~mVđ!geR<j-I4 @1i)4sTP6ץkaf0DhUĉ3?<D SHd+(> 6fv80L/ԫ״՞rda ӽ`l["!mġ0Gj|0=ˍ͒U?\KX|S 7\VR;MjoĄSGԬF9k:T9ł%밣qX*=܆|Cg, 'BzqE83P:GNΑ%U%.gH:K$/Շ{FRw_< ?ME.qFʥHuCw<^G.G:}U#`{ jFnI$wЛ؂]^J\2ȤdӕXS6!E}[X :"ƱDCemO&k ZyG[C۱dq {cm]ĭ:z"ߩɸO1T8ЖkDhW/yɏynAfςD6.X<cR}AK9hmAd69 \sͅ)ޙЈWU}ZuYdfœmi\L-胜0($& T.Z)wmD7w$Ej(A+)\Oj )RNaX)`fX=JCmCM \ۃ Rz`(}!(v5)F`[~?ɐd[E/Wbl%1+ToYxS$!t(Y޹W+PjnS6ǕY x 't)i UÒ-W79g#ӑ-&=@9:x.]Z+fC$tDV1,\O)w^ȿ>E+CvP?ڌq"֛5-_~ѱfaN=}'[G#L$u+]RKEWah^ V!ϋv=jǐ#.4S3a쓒>fMbϓ.H+jYqʞy)npr/@釖jj=_mĊ'A UnC?եr㮚Wm.k *qnVϫ*5#Ѯ1~ CǗ,-&ac9gEo^O@W 1#4 .KNU0 ϲPఏJD @JO5C?8y$˶*a;y{*LMdB ?ͅryph@~ ~ %M d%pSe !7O x|ZD8'2e1C-XQ^u'ى, N7H5.kep?fHyˌ>YU@|]_vQ@2 _9Jy4 Jk(L5R\k:N `8G1w'gqF \*!K.#T9z5L8 .6Vʁt!H9RK`iB*@( d& *72'` W҄wvD"Z5Eni#mLiUh&?:\`d8a - ZPݮfS(RB 8=ct+,Y;K8[z.1c0(e'VH"zC. fs1Cs/ _>k ]wcY FVͪHº(e{ /W r+/։hɸz~vr074ƈD!  NF4%6R@Lꊺyhl6hlk{il=QUf))|,ԅUx,V!MIR]ꆲH!D%E e/WAH2]pŠk *ZP]A-[8hN8#d\ u?R $eMpNM*C+]}L-D4M4OI.v|Y/B) f0hHFpZWekQ͟zh??@7-yˬ,|,|"wH>ƫ341|NͷC>ix}i;} 5{׊YHL-WYj"趿VVF]35EXmp]7 f]mi'=}6~MLMfAwmlEk:Ik~sW0T)q:J9Fth"$2:\;:TïuJJ} zwKʸYStLę3ǂF5+ E+a9MfYD*ff&%-.H"nܥSEQ$A"N.q,r :Gr4@j"]hd ]9F joՏTR$z^[i1eE"Tn$N qJ=zIo"c` $ *Au&zA=xR8>UᆧL>K:!0z Mh*DHb#[RNӭ&  ]O ߜvR&27ǒT^,qh+>Yƒ@X&0ѼU QD|+Т$lQv"]/x'[e/NֺH(ߪ,Dp"xJp6f0kRVKb?E$԰'b~/_]䮕k?vܣ>Ҿ~*E^dԋ|I)c'/݌cNI2` Uvq/U5 }߯7ч *Y~MN`Zˏ/1&~ez%e@ⷧ*K@P!fjN [s4%Ouk[nQԻ97Ebun T1"R;huE(#[T$L=m-$a#)s@}ITZ9).W{RsNj\!=x}?NI@{}r@IZya|+k,ETHXehLh8/{{F)û yxCSg @6;6 vI{E7Rm37;9R}J!քX|`\Rd@Tчf?cres HM@KTiiÊb@WM? VHI [ϔnvy-6UڬhܹU[#>T hҶJ5c:ً*2ѫFX̏_h*Cucar ⯁8XF~N!@F&?ɍumwYҋ b蚦W"]S @J 7m?#~#\DnF6E>jx}ގi?WCU1<齏k`{_ojvi/fLaxBfPr7WO:Ͱ4G6@̭ yrA/Y2LDŊWQYffr5UeQT@FCkXÒm*mU1S)D%[udK2i[ɐJu]ʔ!cK"Gn~ȁ,ʆpx)TYeōiBi®cxƅ(D>fl"5mtL#rXӳfpqQ@4l'C:P-0uLbԧ9t4JC`3LzGҴL5a8S5+3CUȠEdgpb%ȣO30K?1p:|OU2]믔;2@9BtV^-y6㲬C\M8;j aC@ /`=|%-%wD =ĕ 3bl\vh Ae\ +xxߥG?޷SrWvZdL}JjC%MϫJg"=EgŲ%ܼa*(aʁ@s`ޚ3@mId<+ozD|+4`'xpHF \7bH_wxsB^>wl pߒoZ{iH1Yc@ 8?hl~MZɂǿv8sc$F5;I"}VeH~(=\JoL~n!|$ M͒_^k5n$H:*?"r EyS &/L2:,ָ!>:VHDub@@)ey `ȯE'r&@J"P-s1F YDg9S`U:; B輾CJ({^[$;s~~Cjtٝ<݃ ju *5`Ux,2Slehk#ɽn}"<>3G |& "^7Sqx6AUs;Py 3T_U XCzP;Ž_j@stj3slK1}h6OA%kh?~vU/uHT:n\A AnQi1 TGN/}S4 L]Ate[+R9cT.lk@GsO4¿3a$LT.4WDbQ]W`e T Ce0!dnD|#p+ WyCv+~isr ]:YB-T2J U/1Eaj o$ -t-3~pzrI %Jk,9^l>xcd\!V&`Q%hLI\`y nS13S١Ҧ-h aW'f(pz*MCX5)00*>UMqKb8`vpaZԥsc:Eu( VY 'N7S:uqMaX|IڠP$T<79M{=;|_}Ch$ܖ⫟5dZGS,^@E6A[LnhuְHT߲-&xṷ2wLu˻.9T 9y qϺEaix`hnSCa7fPJL e^ey_܌nv̐ L)\PvcR?GZKt3eJR{FBUJq#4dүYJBqo[n%Q3ka~5 䰊OrMCc^=v̎ȑYP܉qE8Үw[a:(ʀw3xERQ(=>ұu+OakSYQѩإx2cxqzEq4pZ0;,Rm{xR}W'jqȢ~!xת5_#ԕ~t a[fVK~z~>-qG l* " kPaAPH0bo+lÈ7\UHa&{:Kr(~NB =Ł(i9ǡ=wOeJNۓ 5[6RPkWqniw %D̘=2х1xWSeXⶽw(DZ\1[Djv4P2+}ieAC藭><04xW+˚+nZ|jixيTe3v P+ƻhΛ;\PJ-\s[bhEUYn̷XyW(ҖkKo`"~nRx͢ZMizSxl4/ICuh_* ѤH4*Q qB0 *cۙ )5ދgL 5U(|yVIߕxA%+Szl&~7xR\!w6k: pU]@m X,?KQF #.Vly`ϔÅzױ!?x"OK̾ms_ԭp䍾nƂ=^rh,zp'BWbrR*kLR*bHZsbXW* j`6N2o39&T |WF|5uĀ>2bMWd9]H()K#z:oFaU2c5L̊3~KdLk'  *kF`aovx2W0Q ,*!>Ol)ys ThWSJagXS1<( zTxx51qP4CZ&~h㾖F^>=V<7LC/ 3tpl}*)%B M!C:riEi%҂ cPsN8JlJM.WΕw2Yfnu-)0XT"Vb'}NK[~ zٔJ cVL\gc)TWZLT\̭qbaw-fv 贴 )n_s҂Lϰԑeg29⨉(dJ토/Mޝ:]TtXֿxXq01"qN,attE-Bӕ}tbFhij)EOQA|WD{zׂNSXSĀ6-t yOئoMYe'*as؃OD]@SqnjJ}~eW<%*^US8oՌٷhNN)D89߰Tnb'|B#iUQ|K\KP=1RsO-eA(F1ҍ5(jm3MǸnxʚxkWw1^Pǔ(ߙtV3ghn@%lѢye%FڀT S0`p [4僢]R~rfjŽ;O[CusA[:>tʺXy_(:? b =P`Rʠ;܏:N8&ܘ9d7T6s;K*=ݜr O7$f{$2neF6(=I)[7}q;H[2>ܰV*o]Ӵ%dzycg[).9y +A.dǿQdS(U#l2q(+I8[-p:7aKB?nvV߮&Y9[I=vcrI݄!2\@"*  ]Ii* vɁeԙ S>:C9ɣ)ָJ"<,.`y`tG,1VQRo$ϔUgDzoV-,xUe[v,Y<+0Ҟ?mQ}r5EWLf} ?o1ޚ ʂ77rYcU-[uZ|':`okN@uj,nDH8fy8Xw -DD<NA4u3T4khhƉ7ǐ׈mU[# \Aw{ N(Ch!L#ndKpX̉ 2ێ-[Bn<ݢfon[:cV 漑]܃^R;S Zl]b7 ܜq2Z6AM&Mbvvn *#\"3Pڅ𴔓2_Kw7ST]o(bؤ Nc6Evr2~12͕֣R٢-n/@R| v*5kaʎlbʃ+clnUd|p+ac}|yG(Iɏ%]\+F0l kh)(;9jqgNCsW[sjoFܠS](TΗAq`I9y1s. Tf ^";(%"ڴkN4asջ\+׈q̰xE g I/Ϥ&!.(x錧T,6GknF- %%8hm=bsS$ ŤD7.AȤ(P@jI0`:Mf4$ ]MS笌Vug0g*)ZIE|ME&\,W#q9#ǚm |yVc^0hp~4҃!L}:b!삿ԕ;&)TR9m N~%C.VJx s$O,]Eo?֣6͓?8[n6=(%lc'Y7wMу75k*W-bF ,ތ92M-JDJ]o7=^+c?*e?^ <*- 5Nk-}pS@j,[-9b 8EŏP*/iJ8twA{+ݹ >~j T:'(ɔo E Q\3(/f aW`XmĢGv)>Jw)w[1 jQ SLP#(40:& .w\yZՎO,i \|y8 5E)eS <7m&ΐ% qislD%w.\(@=jrw0An2Ax#;PU0(7p3g0P+MF~JG^n^:yl׼FDp.tߊLDV95#Gl,*)Q xO2<(, j6 2sF)EFc#=WmsSܔƱZ)p4fPw| %sWxl}}a+z+t̨Wc_ZƑHM)3&ț *[)w9w3N4q{gG 쌎U2ݯ7y53ݸ:jt瓑:OP]?AL:[d/T= eɐt~.`gz;zDsF3^l^襏ɣR=f!ΌMBIt 3~Zq7lk-2r[/rȉ0w"I|KM>Q[XZ[+%kpcy 5B}iݻsz}O"I>'ʓ}O"“}O"I>'$rD}yO"I>'|O"ʓ}O"I>'$rD.'$rD}PD}O"I>D߿o_%WO[,vu߼d\Zy^=(r?|?kz_6R׃zY?Q7/օ|?JFwgBV2ZP6]Yxx@!iW|I\ ->R#Y IwAb0&_]R3+ H9S_ !͙əDS"ǙڕTuƮh b`<W: ڥz('4;7tC8WW&-]98z!ەN_ ѨU#re4>c~+F=KAʔPx-eQa}B0+@.a 'Hn.et(u)c%*Ֆ!8^[$:?>/%1wbU_829A2lU/k+zQIeЋgzyCgRE+89-, t)8}ǃ.G;Fہ~ҰX< e!ڙx9=)=3}إK)]cȾL]?xåC\<Ɯ-ao Ba\sNt($:dq X%'7rKxT9 -Aȣj(zʼn[h0-Γ/D`V+ʛn D ; ߐY)4=j 6m7RȈH[C5о 4|$lPJ݉!ɨg}kFΎ3SR*O[kTnKipAun{L8/ ɱo=A^hѾљԩs;f;) PuzhSPRǙ@+t9 o}lGõD0se[N0zE-gMzJ֩\I4 TGIϐ c+H&#cnDō)޲cFzLgv<-t$vLC'-gMqrc(TJQ)CGSRTD`?i6!8 U8Ҩi¤K%ҥ^3h,cJ)Th\+0QY8ɐ{!eȱ3x%8eæ'Zђ.5Pϩ;^*BR&stlJѸJ KxP4sBpaw7d;LeGav(Ksڪ,GEi $wYf<G< jӊN|u" AG 7 ;`CɕuDahp-oIKj8GEfͦg wSQQ1pe01Cqmcƌyd8yeX9vw,F [C(W:wnmɞYegQrF|nr}/@.GW.̕3[d 'w5ix]0S PE+Lzf]A/, =*w-Z& n-kYy T|QeP{22Z\ؠXR2-$N3J%jS&@H6ci^zm"glu61!5+#IX cf{Ŗ9Y nwgCS1oL c}߲&Pʉ&G }RccC{(9Ǽ ĚGl'[v5Vյ[NbɸU "Ry 6ʌOy\j3ȷfbܟ|tOC _!أWQ.UxT`;yXpB`MUe gŔL*#`o!-2BPIѽW[*Dup^]{La9z8zR;|`^{0Z#b4+&`XC#^P.W*Q.`) eb)Vq.2Y3v4rDrA)<d;h/J 3O#-ԡdSI1?&s36ʼnCa:n<8ǔyXQ|+r(e%"ATxJx.-sx}46Pډ+ 6&! kӚ1qe=>~X\*jnnMH$|V킠;wiP$B5Ss,Tq?V72[Bɻ ND&A*vOS lZF7Ct>*,[ S.^ĂAK9;FLzK-yUc-ⱕ5"' X 5js ="ѝ h;"у"э+hpF$%gHtq0^Z.*O>(AySj(CXљRk2<;tt2`,u|7b4 ] M}`;ʡm2\ Fb+۹]v,Rd{r}w~vgV9(>!13[ŶTGmCv0bgB Z61A&@v oktue]R*=ELѵ׋؍@D0IID4ZÔޖF c`VbnsgbkZn-ړI80N(n *b2fPBX|[~(nS^[`7::scd5nˬ4ĖRrт]_kкj6*JZ+cݬTr𗸽*&l2SJܡJ:E;IR9-SKq( Uކ : ŭߵ9~kY_mןF]aeCYЍI܅*^¼f95 Q]>b1J ЗQi,)  ¼M֫4:3%Sb. X-iKDV5exu? Pa^W*Q,.Pa^WPftX9$Fa^W 1k2EEwz4e rQݮ\Ra^WPI[I.-D`soS#KYB]ɦlRtZIVN<ڼ4*j^ 7]}b W3fA2'6yrq̫Rm^WҼ.,Vgވc4~I0rءSi^W& G*mqaܥy/e'U30rLO.ī[f M/2-Z`%*+&߾dϲb٣,Գ1),+«5wrkTJ(fH?Y#' TUxYQW&dZuy}}쀛: 0 wZ,pqҍ*{`0+*e`h@ָu\>ڣβ2ǣB;u(2,|*Jr-o?;Gecg Z>:+<*lLx7{;MJ#!W9 &k]۴5w2#àJgjwk+THLTMbR/*N:Rgj 6zSP58i1qجJ8qQĜHʆ:x^O8Qf&\pϪ L0l:,41PgEA96h)[uyasTjLp> TmUH,/,[V^LJeyf8u@ڨӫXA 2y`h溩20fwQ^W#8rSn0;.W&r.˖-3E5x*eKe*aZ7xOTW(+r4(+"L@MA;F8bm@UŚ2+Defl"|)十HUU";$(g _A~)"č 9/JBм<꼮wy]̄5BfEyŤy]<{g2XU)zi+2:+xm:F}vUMީBn:(ZBQN]qGUBhomQQѕzW}P~T6 IQue?^ <*b-PPTnaUB ԥ3ۂH*yW:sT]{w@UE'e{@L30쟪*΋@Y %^1b,K׳8/̀+빥ocr煽q%ΆbpQ M [ 5Teo^6]j5f^ݮ޴]zUF_f$+m2Xwx+_CMc_lH/Sge*&Nh.!* yEJ^r!%UוʪLIfv$$N!*ގ"AҤFNUF^%Ez]I(ҫH/rQוrE}u/VeR Ez6Qו4"@k. 4Ez5zkQ7(JQQר0Qh=[Σ\,|*,2[gB|^5OEyS;ʣQQ1;!?T*_ީUupș(^Ez7?#Ez](qJ.G^nzaH+E5z=d1تVݥv؈FB #AA|EqE|h8*pوRG͸A\y=@団qG]9Kwy^pTĈrztud/[|Ѕ(rCT8 jvQ+2_>$ap0zsyUƊ/ [>/`˳>b'tyz]Q=^aXY@ *+ < @n HoU"YׅD XWuؘ L\G!yVcwE\. TH}SLP փkgy^~+*3ջZ5w{cjdJ{UިTlYTAW :"`1Sa⼎:+$9N+O] 8tۡtj<ܕ.;(t8G),VRT7Gz # BR#kCy]AjGDS9ы1_ F1[TWTWV'Xy]TDёOXny]TBW@]וD.;XZ b9.qeU51TU #v =h", , , J >s<[I@JbE[X;Ҥ*{M?DR[Zv*{ ]ޤ+0[R{ v-E{\m$sd}O2I>\Wd}Pd}}Rd.'$s_d}O2IJy-$s<'$sduI>'$sd}O2I>'$s%<\O2_/ۗ~||K]x]񛗌k^qVwO/?~>w/u/_굫RY|Tb]laP&% nc(~Mve(>Ck8CfteS)qyc*OP1'+☮5%-)͕M33teR)Yo5,]ٮ! RJPR(ffq?]JєB1Y?W*Xj&}Xo؝dg.eR]It*5{԰ ߺC8L5ZpQb2m'w.T T Wsټ&;jcF`*-1)ەhM_Jנi~d$\+fMG] j] խhR$4R<2a8-RLٮUcP+Sݶy ߕ乲d*X ʢF킙D͕x+}B)Íá`J]R\Iٶ)ek|9VsZ$ BCu_ޏ1l\1qH f~M`owv%bmԲK*eIWe*XfW;?GzH5/}_Jӻ,eS/.\1:f/!`JR&10&q{Nqe:if<ַK b HSGkBœ[ccTcX=4MȦG0Q31 {6L.._;0^BN; e}d =7/X9hݓ+| /Shp^XgԶk9ǶV?ʛ_9܈ eRyMpt*@LiTpTi ~6p Sر3LPSl<2F|,w[Qp_p3G!%u*l*4ףB;?9L:|&s^l sW_9,=d;!J)R:sz9MFa Tl&Nre낦uvӜS {6V9{4&qݸ_΍Mo7٤Ī.`K6[lWJЂ+ &˅yU ﯲՙfjsX4W|{Rhf':4l*up-BUsq)erI-,^ԓ+q*.e{Ο-J([^D|쭄8p\ b¾hZ\J嚿4mc#p,$MfS6NڸsuoWh\-y+LIg:\4*=叹/aM`UV?nt\}sGL`¶UY7&OW!Kucȓ)_y"ko7^3l#:8c5yŶYa۰g<E.q6# ,XR*Zw p8M-'.@0y ШxTޜ)ȦP}]Y%ĉ6It*SpT }w&<9ForpemYLL|54k0:KH͹E}r)KsW]`GT>hRA' M >n }Zi_CԯizԤ90gZWtQgmŪc=+X"[ ؔvV;a ri52BI}FyC-c>{[F[2hQS레g1Հ"6EyzXN/}1зo?+'s_!O;*0cI+ MHN]JzA ,6#;sT-Y:6Sh{-!IG#4m/EPS:рc5eb9\27P,A֎ͱ5@\al:_mҼ`:)sx*lMaoo ,F0hZ笱xbJ?L-c<Φgwހ99bݰ3cW0Sj{۪oL~\)CTVlm;WW;T}[uɂ$7S83T?U. kh0m %`[\[x1|3R@.s ]bɇl^K~fSpȃ7ea2k3(i>NEL3L}f`WiaFޱT#o'A/ըqʝ42,Mطrŷ61;3%flT60=% L[_S2 θ67ָZ [dК%\5Ȁh[h%`@^pۥ7֡Lȡ ,~ 废-=AcJzhAsWrw{E>ߖ3| :8 kB : vg˱,}9bt>akĚ|M) 㛋kE)~?k+77&٥=e( E~mіqA=t5)T >d=rM~ɚoFxYv+q3Ћ7Zuŷh n GTp; Sv&7P|4"4<a$r IR$C%*Sw/c7!BA6je A"rɡ>*\#tdJ'Ź42WWpT<lJQ7*㍷oz-˅"$P SflL{YezQƄݜ/, E6C=a,̬^d\[o XṼDF6?[c27æP|Čy\`C/\q_q )4 Ngj5$7=^լTSf̀y)>?P29mwɎ :ht-KW<'}jcJ)އ&b'nܻZ1l[/Iw@HFF/#wɨt骳ǔVsKt ijn'@Wnn/0v6yꚸRBq/\ c]h/rk-~&]trwK [R͝;vFގۡ#yn$)} P[@ޑ&T|+nONi!U`ZM Zo~Trcb5/j:0XдaBY⇒)4\^j5U\ץ?}+o ͓KoOY5O,JF'Wxg\Ώ|d*Nk/w ݹ;?beSNm8da/͛a͙ZtԘ 7% lљYJi|UJ>Ì aJat6^=0Btᤅ K+1+|%cV*EC؅ #pU4+^x@a7(u2t]YK) O͸XO8;ГF9a)K @Cu ~ãJYQ~ϴخeXH:=%48ނE8( 2Ee/|+$:q6bdG}29)8 a`aTA"-s/MԴ7[4ظJ>h sd0'^"& d,)aJ{bj02Gh1u.5As#PۢT&Ydceu ?ߔ@IFA Rm( 7^)) %4v$C@2j)jwb2Pn(7]CCI"K?rAϓ4b@{W7eQa<4v U"jw`\6kS !\fLt z!ᾊU`X~#U3fP@5D;zXhk5ٖXwJ¸2+@ԍgfzVŎk#=FC6XqSz(M ;pEroVpiژ;Axֆ8^R̼2V(RWM[#ZVI* "le$h}_}/N3 ҿiד ?Y"(o"P(_1#ސpi㵫 m<46Bu}0X]ѦpTc&OGB|lG٭P=:B,CY7<)/(>s[,MUrm/)Kg( Q.!oS׷eznu)3܅ܨ ]>#}1iPPKy(pe0DbQ62-6b*#`;͈,3n,N\ akJ@!ʛIv?DǑ䁇!?)3obbo N`}.3yu9d`r>r֦+?A +*#IAc։+9rDrB<48cU1ϥqUz87K #(JCb}e14 I{7ZqNn5n$aKWRL2Ƃ%3%aE\*#1StEgAJ7:`MtF&VNXz >AI5t$}RmIn<"7wWZqZVRc}ICZDRշ@:FnF#Mˆ80>UC^a433vTlAeMj+1[ELۋd̒1{;k#nױ='AL 2zVrs@`rMFXaBn=sڛb<9()JFFSf0V*˘rfM2yYwW8'/aϕNb7(3|Օ`&P2f^$L0Dqx0.}aI\̜{S䲂6eI(~'C>=Z{ ke~b #3\QQߦrwDQ?;P`MPҀTc>).L #JsIQOb[z^:h>aS/j`<~T=*2J73GLx 7X8рX%@)R|xWS.81O2Ȯe]o<z.x \CbD(1iOFϙ1" /3])78@Z /Js\P2 >h,FnUd]R#fiǢMJ&P0K2}h(3Ec]yYc5fv^b^0kDeZD; i. V1 "rQi|LX.nj̍/7J4DwU  z1vJh (A13%.wkK XZ$TIފMPTXGG_Eu?揥"J7SeBW` "ɍ[i[[@}l"`՚m8 t[BaNӭP*}e{zKe[AN~z?&-pJ[OWJ! W)d1#M9 %G&Q|NF) ‡T27 HnSwL r6Xx!TMq7aT0Loz%kԸ_&2z"sG<]7>zzj]a3 F= KIj"F#l;M:J% RE_YPؙΕٳ\S*d)ȉ#mڎk"'U\kd2x(SJT|; Ge|%>'st˓}ҹ/O:I>'ʓ}ҹO:I>'st}ҹO:Ist}yҹO:I>'st.'st}ҹt˓΅s<\)O:_?fU>, _W^c}oO}[ڻ=~?(ogl?__?oJך2/ʇ/]y'k k>;-Sw_|ծl0_#s ]ǯ?~w?|݌iWLƸ_/_Ii~/~cӛi??|~6\[?tt3t}=|e`\Vm[~z+[LJ?N-Ï<]_r9sr/Kߎ7]q`[w﮿|@AJjEX{g?r῏!qu#"~&Fk_a6\иkez Ev~Î7Ed\ϳ9̇7?~^Y[n^m:}CW~x)o{}_O??募w2G_{W_ዷo_3{p_ɿ+?-էN߾4ջUcq-?G`Hh~cӏ^}ߺ.~5D5})vɗǯ̖KJRGɍ^ɵJZ/ﯥx]os:^z:~` _[~ߣ]r=7Bc1BZ\_;_}s4kL|M_ ?+\ZϏϮ?m?~=߻O W>wof.=tY+}_voϊ`vm2F| :g>$GpSendstream endobj 610 0 obj << /Filter /FlateDecode /Length 3678 >> stream xZَ}W4f0Ծvp1cA dÙeDR9ުh"DoauXu~өAw-v34]k뾾VАUL..8dݬn~vb7>`pf- NCnn_1fujPFrտg>~'T6ӳB˻|rƥ| ]>R\OWgX?Te`_ORQ*c^~$9|u׃qU 3HvuK S7Ru(/B!G~ݷVƒ\ P"x LR™*^2=(zh@ (qZ8g;){Rp`(J}ؘȀ\Y1H:j(58Pd h#eݽEF )$ $P 2{d UT"(M &lplANABNIkAD'M>! H ,) * `!pe : @d`r鈈)|"ܒIGk $.MRHaA c, 1@D6uF8fDr<%Liv6i(&#"a T x++F#[$w32%2 =IY yPD}ȇx)@D{8o`CTDOJu1TqV()2 "Fj6XĒh Y2QGr+HR IA+΁Y(a+ Gj!$% 1iTWP#2]RQ*7R֗|tD[p}jĂSpo *2 T0t)DNGiҔv5ʗJ֦s^Š({v !S3mW0l=s!SGEAdggRU#@uҮF2Q LEJhS>tSEE-.ݍfp VfT @آ/i",-1(S3s)3BM he91($y%da rKi7ipOiC 㺣SAL6zh1;҇Zf'*lPy5(REFR %( KUW)NGkmBjRޑbHF_0ȣGRV zrH@)iEbeXd"Cj\0Y#(ilj;|QƒNT$p5 QEGYDhr p`#nDm%\",Q@ ؎')xYt"/PE ](bjc0铼XGCjk dqwF hMS(Dz]#&E])\!1jL6$`y6 :D$T~kՀl=OiƠ6T*KVI¿a7"Q}PR} gZR<`Yҫو`Șzь˚NuGPx$>YzqM|M/ޭ- j{yy{\0Y%biS߭ђ+6cr_Kﳛ(ہ[-3YY4MBHcB71|çӄPq# ,_a$SAOiCc]^Y?rD7woWs~'Ne)-=/dw9ha>7+iAl79Sus}r?Lxt}Sc-p!Eֈ Dr|jl{=l "ήZPܟ6a_k-4͌J/Tv_4|y{nqVF,CǏ%A.vB8?a;DDzJZI) +L"Ebu/~{6zWN{]_-ߏ&NqZ~tA)p̀rhA Qmz<4{ݜ嶉$)zy3`@l9ߋc쌁ea~O߭F44g}ncn7Շvvy:jWETa4i=q=Jp(g+m$Rl0Ql|s Uj#`cm1B_o(8E<.+J`?M#y,6_Dk)iɼ">+ TI=9)pVTS6]h_m)ﱅ~ʚ~Lv]ߟNļyryA=&e? c8d$pp1fz K)E'_֟/JcT?CyrUy35ox֔d8DsiPsnn^#O߼]^ ebǑ=CI4Υ?Xvhr!t^tE!jpj,2?7uo:__aS#/K; Q(=u"dD f17=vlSfLs/@Vkodl2 p/sMH2> stream xVKo6X,zZKS"q$Mqʡ@kdKS#Ht@zZN^j|.q@I˫킴'ɗ/ Vd YN2QX<<1(Eљ,XDB7O Q^1,$ڶ<\RPݔG=(\V 0opW 0h^4!D:񌍂k2;B]í[Jt7X2 FeY;\Ŀ 8T {] ¼=0QB\&sdsG+0%A9#Yy8k Wt^߶DuC0C@ؠM:) }Ȋ/ߧIcͻwm]qGnsTٟXNގ>sMus=S8p\;IωǺquKuV"-82:mӺW̰yBl/MVc_\%FU q4jz]mV\TǬJ6(7:m%RԻy'AqIo# K,LSx:qiQge6۬N(e p$ڕ}Ow>z=Dlk:{Ŝ|@cvc׸aix. l:MT0P|8^OܤVʔ߁(n_w.:dn( H%A1m;;N }" :" Ma*NE$R<_)#*X̔"p;T mb RroŹ]xpGlЮ*~6PEK[(ң@? P.wv,p]dE[F}W.Y/M*fBtmE2RhBu#) u??.y37fkf(MD@O O߽xv5fPg8Y$e T;r9Sieɶ Lg s weeeN8 {k Cs0Rp,=.'>0(ϳ =j> stream xX[o6~02j4^D,S4ٚI!ރj;:Jmi~/I[nl!}!Nw^ fv@Wg$0H,pz=0kȐ4Xs'\$p9 62ZF s&TJT֗h&iq~)6č^LݓgI7{$KP.`}g޶ue{@*gPm n~t_HTbtV,%-:$9 ֻD" ֖N'P:RVmd4YRHg5i!ӗW)hj@[ 3:4APj(PS6:4MUiT>Hl&'hRiW):dD $Ǥ*4Gw>kedr:E-D8ۄb][Ro.$ԇv2p'B^-ƛWMgj')WV;1Th/}ƱYa6JaꄮXS([U:IMq9 j:2&=tAii:~^n$0I4 I|ZRoǤ/D 9|=O.m/8_x_- _)4{m0`6n|At|(jͱ#Q4o6Nӥ>]{nS6M3{tks/5DaDzmEj:!uupgh޹ rވשn>m>S-D|[B՛bHXv,O11r\8~p_տ(͍2IyO:ѕ G@e FÙH HEn9')IB%yB݋)x. gi[έ}D9~/ MwX_}zArHzkKkeK*+i.H?.هuIGEtp?kendstream endobj 613 0 obj << /Filter /FlateDecode /Length 4012 >> stream x[Ys7aߌCS$%V%)Jt$*WE\ nt}u(1/;]ٯgทL;2 sޓŴ7^N Z(ʘ!hŇDR+;ڎgpK)IlVyԂ\g'߷rY1 [x=v%mo/g*b$I \&ZSTSMu3ewif.y :&l>S͵s.45g7tXG 0Y5^5λ*B B*i"w6D0⧠" 䆱v*X{*x*c-2g*ݐB䀠Nȃ뤯 ES$aTs6S3jK'\H*I\`t><;6Zʅd-<->O<@|H*/PRccCljLh?<{jKGN*`3W >^ 9U0y,lq ET zwE! ܽ^5xڀGnykGNg.{䂹+y XQJNz9P\"s*^tޫN*+RZT"B%Ⱦ^MLs})uM n۪RU7VNB_Գ^G~ s2dׄfơj,53bһ TL4nG$J53xӞ }hY'ϠvĩI;/SYa j'5z26I;vfY PBh mVes)|Kf]R {;jSTfP&X{9 m*?߬]C"W^էG_r< OűpM(Ib0opxYz9`x6|[YDBw 2,|i?A^Sae&V~B{ l6)\;c'UZ 됩D&ywx=c1R>_C}ie[ݔɾu.h#StOIY1'LT\{,dkGEa1}b X( ^WjC*:I̪Z:wX K%cMRm!79'܊*Tf ˏ_~;۞Af:lT{; &cLg\%U&8HuףC{9+cl.T#ݞA:|30K=:v #_L<:FLƃ)'5,~=dC2{φ]Rۊ੝d.dARYPLuRJ g]#]t媖Ys-\o*9'TiuM»N{AȦlw;~"mu|C-б¹߱ H&]h˅uܚ:nGIs_7W>vAjb|mFh+&Z{(Y#AΥn[\RaՈMS˝dye%NQr}z(p+xlɟ&y.,J l߽l>=Ï:X7ͻ M@sE߆5چjW6%07E?v˓fMM䲿v|緝J!6v_ (tL{hž.#Y.xt|x dĻկ_apo 1H & +9f.NVwڟtlJوKO\x pm;%7X\Q5# Oݼ+nnW&t&X_|!txC0NCkpPA^;%s=i?Hg!@^nnVzFH:ݤytqʋk Af)<FZT 7䅱p8q̳,}&; E)궈xcSXNy7 %$1or!$/0í;e20/?0"d'%su ^<0sU`$;[ID 9V^Iץ+h]bGRZSLϧ|xP"X8b7}谫sm!BYIP/ǼhsțG{ 2Ze> stream x]M ?њ6vMa!Eoa.x/Mvٕ'0X7X-h=ֱ4AzR%M`v gY_(݋<╆B6Hn/T@gMYKF[Sa(!3IB"撄G,$ I?P1ޙ-p+2hoM~(fWendstream endobj 615 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1035 >> stream x]_L[Umur),^ 5"KS8(e8RЖ u ]` "Vn_Q8挦2LK2oc9I|?(Jr'C&)>-%UjSF qآŏ]HMjle6RS3hfse-nfgsnhcsNb5;Y-&KFpkFCB$Wjh=QTPCBvEp*FդAmbC"j IOxqC-ئ$$ZYce2렴3U5 MSY#Xa-4?15wi üo &W\,g¿PVF"MI+ء&&*h=V|Aҳ6;aoY2aop`D8KV'bx}FLfzZ:\x.F)l#3_uA>$FD %YDXTm<$g4|^ys 75'N_z(lmo?ui|l DAAi߻*>ho{{s f~QB&x>FoT9桥nks-2dO;}~愿g)8LU՗>LP˻hzG/208$K]@az!U)\wV;BkJX0/;-A 84H:|_oUgDݗVobub؅Ux(A$s:kz t~|37`` x)~Չ~O'"#sxM`jɘ Zit ty똊dV5@Vj݃Ee<{rL@X*4ۃoOM7zW'pًendstream endobj 616 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 X ]Z2D! }I$[.%Mů Fa:8 dw >?LY=4R> stream xcd`ab`dd M3 JM/I, f!CGO]nnW}G1<=9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C53000201012~_)Y{ӏN3~_Gئvkb_ޝ+ڝ͑¾lzٳTR> 7s-<{ 6qb \;wq {'O vVendstream endobj 618 0 obj << /Filter /FlateDecode /Length 326 >> stream x]1n@E{N X%4ӸH%,Ea@}|)Rxz=MҖԯ:֘.)+ xi8ϒr|;T|jY(}.mLk;Sք`0X_Wt*f/Ƅ ֎{c{50j PR&2X9DX$^$(Q#JE"H+]39Fcѱ7;$!~2i6 WFinQ)lFF FinQa=ox[4m|0|)e^|Ud"Dendstream endobj 619 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3239 >> stream xW TSW־!{k7c}N㣭RmQ@Q4 G Q#NA($UP@LijaƎVVkaVtvͺYY+7}}J(g'J"Lٰq&^|HUZ9G.g8OJ~4ܤ|n6=+)DU$KQE*? rE>ŚEeD&#%.FLأXqM$(#(&J Z9HՐy~|)K\>-=C'2*P:cEJRMl* Byj+J:EʟzZODP+2*$) D-S\s..h9Ƹ1źnrxIWnww型@yExZiB2hN6GVnE, c-c[lTuN-ĩS,JC藴ͣ-o YUZ$(m8*iN^{Qy=u{`vOS"޹jaT"HkS_JGu9ڌy ^ˍ1Fe6kڕ\Zddh5|Q ; af~lOm6 Uo_]C5Q';<ċ 'ˏŷTnL dm<T7U:o!^+o|L9d@ XXXʹ')V$"[eIE* {ZcqUꪤ50k츉e&Θ*(oZlN/P7-1$M5qUIi|T^_n#̲Cֽ,Y>> `>O3!b4,:+ˣxrvYmvD<8^s4Y> WUQ6hhYuL2na翥s{'&: j#+niB*RO (6\ NrA`Nc5BeH.ByU!\5KdBmZ[#D_Oum/-+<ϡe-|@ݒq&4-q68K.QcbtH})\eȦ-Pc; @֧vA4$K@-TG9y[֕B)6A3߼NT0>PQ#tzHȂC: U`d'2bM0HНpj4αʳJ+^jE>Ə%X<죦$+b]c>X PT^Tv/:Xj0л3EƇjk8TV9)zn¾01&6 pȏ&2v&﫷teu}^[g +n;[L@>ѯ*X"Vv{\űk-֡+<^󆜵_kǮsZ8=3kEuH#+qq{NL{D3ef-a9y#KRDZ\z AdSJ{V=z&kں{kNA'N)mMG"ymҒ͖fIUS;4I^\AI "O_F[g8Z _4MmG:{u307`~*c|ʰBMٻWˎ! 0pGd=Z-öIɓ ;#:" 9N@ak(<|_e75Kzu $~%oMiOIoJimmjr`rx@B$D,UgvBKf&EhaVՆ<{g¿“$Y$5#Aww^5is .GXB[-:ρ 9R;`OOVzQ?i>A b GGnנ9a=#*vNJ͖7#N.` ?2!sYS 笘OE$)Ov2yKTVbW"CxH}>?=Va(ҕ=! c:LxJ0FcA<4ΟL' zRKd~w2)Ƽ+01,V-*bKq"^[42ޔZ*/;,7 h<-wAoda;r@+0_ 9L|Sfy 빹-DaÊ=KXy $ JB[RoJ(48W`cMF\7fp\Tlq{ endstream endobj 620 0 obj << /Filter /FlateDecode /Length 204 >> stream x]A EInHâ`]x{işa>š?ϲp·!kz'ODYy%v4QODIt ͈*GӀh,&(:t+X:f)EStRIXka,†(2!,–7Kz[i)ac)N1$I|άeendstream endobj 621 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1182 >> stream x]SmLg)*|ͨLSf@PΉ 孶ji9 loy)mR`#4˔iG4.d2?<ܗ]Ó<ñ X:Ң/1g AX('I2HJ 1Dhښp|Wš jVVgdHjʬdht,W2%2fKf^&夡Y}V0EuƂBfsvu~z0LQm.bXc{&lFHjD > $Ml#7G<)Ta?RpC:9mt~懋ęH6EAB\+T037uP=P>~ 7Ui|A Ui4pQfR t{]ͪof=MNb^ jDsZ)u01@Z\H6Z\L Jx'E!@,U5G+^y=,^!%#FՀB'۟Lb5Y:SᶓՃh1)ʞ0)Tځ @sFj4UdW\'yadBPGry #f9A#.KeWڵM*w qO:p:':<~{k|i{NZ(Iu9@:l1MNO(yZksnON{>DŽC.wI1 '"dwjGyTD`b>,-MKve)=}EM TEnoܥ7(DGx'7%Ny-GHUl;U"KH?goм+/!)䡟&aؓWf'QQУl"k{]5{+/eƵk"ijnz_/ᐃR淙$xΧ+C|-ۤ" Bv(\WTqy5lz[HYuUoI!m hmwKψͤf$I^ \.JJnhw] '°ab=Gendstream endobj 622 0 obj << /Filter /FlateDecode /Length 1572 >> stream xWIoFst鰕YIH] .ER]ILH:WprVYe^OOX~דOW>[N \/$f)XNCr?D?O}o襇},XHDUQ2o%"B}ż miZ 1o,??Hd Żxp'QPyz)ZԱ!~Ct^6?W_8gT/ [%&ZpN &(%z+K1owRdPnoݚ =}K:]>Y~=TXr+sc$1D9̡b~̟*#5й`帢Y[nYk 2@" 2biq9kAlz"1)CϪz:Gv߯ZZV]TN: \*K/4 !qm+IUTYڝp?+]GjUUg} };\k]ꨏdmYfHx]ϯ9|oށ Ÿq1gw>mpwA 4,jÓmwx]:?\S:ʨn㍾nB,.:HB ПyЏ/zDOijQ1J0F[seǂRnK .|̬ ؠuD0G (q\ڔ~q{ ?^'c82?0sTNz7ϦPįendstream endobj 623 0 obj << /Filter /FlateDecode /Length 2218 >> stream xYs=Aoiδi.O"$q.Pd_|bw]!ؐڿl3÷l3a2x^e-p9lh0WyV5l䳥"fL)w82yAv0:+`HfN܎hF-HYmӧɿpEe⹎j1'M%|;K2J5G3 aFxB3y)HQeRsHsѢ1& ^4c Ɔc238h8%샄%}p?WSI_mIwbYp<ѡ4LSQx]>|g rgmf#a >)rZS#);B ǭ,~}?3* X0;Ϋbm<agHShqXVPMyU[*jH_H|(|a#CGgoEo7WuND7T<aBV|uݓ?rݟ-?aӥ,/ 98^O?܁^|τMrȡUO3Hwn"h7)& <%nIz +\jHU_(c%*pHMF267  ʓ0)"dBۋnbrɚrnӾ`[_G)be~'}C2VdIUQlp Ju@`LP͓0oXgA`6 )@ēVb<"Ĉ>aUI-y!Q#Q "rsHsxZ>Ԫ@ h/;ty7t =.jإnGoWz!_b76crXzrǛ%풊^֭ "B7/9ɭ+p2Z lIL*&b[94c?8Bb*=/2+kQgLPTuyH{e*\N]:ZVgDU7`#$hQmJ7Ҥ.r:Eϛko0R[>UoN:_{.uy͜AUɳ]#+ ̺3tl .q6g*pDqx\>^d{KPlg)@5 W)|a)1a T)0ߊtqqvu2YRDKKrV?endstream endobj 624 0 obj << /Filter /FlateDecode /Length 2643 >> stream xXnVKh ԩ4 "(d_WkkSv ;"NZ0 -?|<<z%u_uiFg러GϵdVY]٣{*GW\|%"KkوoJ*oVY], -LAlK³U> ^YlәpsZe%/Vr*8T'oxJϴ'rH$Z1\ 8z#.'*⯣3s1Y=xd7F~ *N nc.W0v;[uxeEJ&ݛԩ>M]YU; _uVeuֆ$}NAF_u֥o0 JHp %|-M(H>QQB!$h2$FBELHQᓴ#T4X57*JGjuStƠ@*&*Cb$Uw=`Fe,klc"!͖41+GMWE%=#n Hp1@:0JbMMxt/iFt61N aB6 @$IC |0i0P e)cWCc 33h:1W"yLq>}S [D҈U: - QÈ@e$ 1bcZ!YoA)!Es/ "qjEC{NS4,۸^wGTDz|\6 sR꼥t3.Y!ka&?Cb)Ҽ8 qQAB$U "kx5㊑cp|4@>dHuD"5z[ Li2\Sumo Um8,i}C qB 1h+FU#s9%P,}^CTMZ4M88 5mEDI(k4 !:;qE]%9GH]j c Bi[*U[NաM;qcWX>"sݗm^Ǥ*W4Mœ>IH^(H*x F> Պx˹$ׁ'Cg;M^qKg 4 לV#J(el+\ЌwC~#31s>{%?X/H1=ܾyǯqo 6D/QcpY!Hc{' /^"_-?W3D6] .kc L%zRMr^W܊W7(ktS#,qx"$f>XzCZ8;wu/3\k78|ӑuυgmf7^Nx~MOXc涍SXcp\ɫ ݓltqbqL` tveo5.tgޭnsr#NΠm1VnŻYMy5~}BˣVyv&$kf*O4s}KJ+y{UWmջFwsl:%.2((kwh A(YOxX O<qJny6aWx^HC'fGƳa_0OwX 5_ /Ek RtM {|16c&x/ՠŬDs#dsp=/ZKb/7], sfRS0*%Cjz=[ߟAߪ $JC+X(%6gfRi?~5%rK;c8*TԤGv2gg&0!Ie*}7vLmPJ(ϴE3{l)z13 ocDY-Ӝw)izOQ[~1>N,UmY?endstream endobj 625 0 obj << /Filter /FlateDecode /Length 1490 >> stream xXKoFWtUH IH^h]@=H3Jb"}{Yii0ɧ'L9je:tzX:-鴐El7R PZ(#9TӌWYhS%٘3N5yY.yAsM O_Q<k׊|>f R`$B"ۈ7JT%v;9ɭwF38"5ΐpJr4rIl1͂G\fR6 w? Pɶ{i~uδ$?,=}<]Dfh`1~:ɹD1 %hxvx6#߱o)sm|.c%)$F#iePw5nNyٽݓ(7sG޻c6ձ 1H :1dΘ>P5Mum[cHc$)-ͅDz),r[vST{/M63?aP7/!1API}?V]z R/ɠ>mn7?Mo=u]&0KrCo&CY BhѪ/G*JH,Cb[H%:> R'oTqFDvhxoUz_1|N8&j̚$b:]xV |R$KҺ P >X npzS%C xKUbHd]uֱ E6",V=DVo(cxO?Ak9TbJڪ}=U}MuP\ _dnj\xyd%sv.dws(o44w:yla1C m}ҎξЍ I4-)ߐhG,ZUVl0 *Llaܡbeݰ:nhAɶ#c+\CxZ[E%bmtzX/.Po_FE>`p =!D 0܆})Aq෮$;1O—/)'2g* $duDλYM,rUk0!rPcZ"/?tdmendstream endobj 626 0 obj << /Filter /FlateDecode /Length 2839 >> stream xZ[o~ż-UDܢEa7b;UMgqtυ<׏|窩yп~Tg<<⟫7?rY;W\Z7^~vɾ󅔢^Wn|!h3f^6ڱ0F9=[]oͳ(ivQ҂U7a]ai_-9٨{<\=8c/Eb ~鬇g'ZrM'71_76 V~q=c/AtB)^-14B E&jX߼fۏ~z>Zϴ6.fd.Uenh>Uv.Wj85XV;Yi.*6 ^J(v7UiRglw1B1KCw߇ubTvu3nA&3g7\j=Ц8(]bE5gm&uӕbqU9|Nۘx$ 0F\ؔ~zcvdo#i$, 0eJ) km 6u̳a/iV"P >"JM J0DQJ!- 3flL`]5n2[Sd׫ܫheopmz *ObHB[Jj 袹fr^Qӵe9_Y7M(9/7.@j-vffuj$,-5rQQ,j]Q +Š56y[>3Y1`fHjn 7jӇ orF9#Ao21TUcSB*ԿX('j0i&H8A0< ~)Q}_*.,*Wn-#r?JhN5aa TZ/TO >/.>ȧ]JFe2_,n)6lدփX:=d OGDR4P!lc(þ;mI"6AөI˨>ȅG GXXZ6vBNSe @1ٖ^I<in)a0`I>"j"#B,<9F*Ww{١"F~EO"Q'EXޡ(&@z ZF kx B5{18}p4 oc3ќœ,+ 0;uQ;%Kz K]Xb;bsҪyK8Qo#aRS>{EU0p(@%5]X;n\jG=",)õI.ӝ}Ф(ePKT;c&0ye?Dv#Ɲup/1I2Yݣ/.mV2 mM3+:@T:{1'm lEHb*)q|V_|=9ğ8uA.#Q|e?yhlK qvLdd&{%Jk`Z2)C?(<|lKiGc5]mG~|:?~`3\d}1MG3($ kCcDzsp`/TmOOw:$kU$?;iqG/m{v]'2&t۟<vv}% |tJlpezu b\NӑbN_~RQ6vV)BOљp0&=N нS.P@|xͽ1ý/~yC.t#P?qk/pZ_@26Hԁ]哜`,~'i'XM(əkiz@~f(G[/;)_U|/)~w5> $.7endstream endobj 627 0 obj << /Filter /FlateDecode /Length 3459 >> stream xZY~oK/FKs(Q|-T*hI-! V~}"-=,1cqZtZj=)דFzɟ_RPaKK&~:RzBx9xԂg[(3.K3d~}1UXIfeQJiiIUoO\#(^7q\lT)0l.,REoFrTP i8L~gQ!!;f0SC״(m&yn4-9Q 2K&`C\58ɓg*5(ŭT+p4m ?ߵu'U`pl h$ٮƘRA'Q#nъBPmW""$.HIiҭDQ|JwmƠeJŅAHAZg>QKiQAސꦧǛB|MVe˞ZD*@%lCA ۾?/4`K|U߷GQSp-Bz͐r 4SjR9lC6}nWi$dUJV cS8 gXnR0bx9Ӑ;xQ BsBZ\q7GqFn""]?wA/VB T6hBHSڸ!a޳wa6]dј!3*G,zQ8Lrz"g&_LPB s Wo!#j؜s,˸ӧ{o q by dpjE]{kɭ;1 @uSYse*,)\&L.s*5~[ԬT(o 6]qۺi{!A qP tn 31Xc1ƎEOz`bѻQՇ 㳜~[ *N M7=J/uGozVdwuYC1`1(!i zR:)iqvKQ({BQ"wy=m<Ԙ5sZjbz0T8YPplֻ̎]"@VY, '{ā*AIk|.-5Y .aڸ -St@S"^ZUJ4]Wmi\ڐdDE*-Qnr47 j Pwա kmg>$Όܹ?#pn,/ yH 0B>3ǨH`B?5]yWs,H@vvl[oOu;ȣ`gh5ђå@B!@nLϩ"*!uF% oY";w@a@OV o\-%C;Ot4v({N7P8Oߺ>)q/Z/q+]< M{D?y&2 }RG-~?YHLGDZ{x=Ԑp :')C׶Upb҅4fpi;:ǔZ%cnHy/Hcs=v_`9>Ci)!$DKLLYX1BT M;iqwîqW $%/,^5Qw/k#:. `R#,^0Hg.[kqg-$G6腁pNJ;e϶Yf~[orPrB^|{8Fg 'g;Rԡ>+@l𫃴e!-of#("Ĉha pcma:0_Q7䇗/|{WV:p+B^1Z˼kt!isRNU]dƺFƊǻlpT}<MXNm9xȨ孋6 X 6cGTNJ#H 38G#uo\!gD(BY^m.\yp{O`!+i=onb櫓FrЫXC%)U I)bzB(Hצ4Hѧ|>/\R*ShBb񫧁P*ąх9fFY$ic@4Fg=Jg(&vQ9PQpa*)"ޏ2}B$E_n?XbH#,@|6[JF[)9,@EǛJBEDBB%~]Y7u\8ۗ#!6Q8iP!kd4DSGn^ lRCrs 5gÊO-#S,,Fh? ($|P_ǜZ.6qb|{kNMw26~tQ[`Q#}6`V v7k [ȷSSA= )Ԛum> stream xYKs봹R0̊֏Ng*`S\LI'g^rZNg+~;%8Seύ̬ R/qiD-eYmwU-A+ͪ]ሄhKg|7au[mc;oa.#}Hz]?kvn@O<)a'-ŏE6eT^,Ӥvk>R}t6"c_VUt[qgiPhz#/\\* >^X*C_l4|Ovl$2 )|_\7(vs;Ea0naX*]e;ɚi4V6' \N#v*<ĥ`cT8+,KS^!0\uaX%dߝ7-7 urT[V(w6n9I'$ٯ]6J4L48 c+?Ovҩ<8 A%+gF侶,ȓS 3f&Ea0p[O&h&(2LQj#f6$F+Y!Opt%3Xq Cڄ[X9n"ȁʯ1cZ1NbfXO)eqEίws J(Di}ɾL&{`,E䰪|=}\L{W:~GaUm I nQ@?|VHSS8ge3L(m݌"\o1꾨HsR }܏44Tj7O3$~a#dSIIC:Jm@(96T].ɝVƅPO6MM b@ǔͨsYVi7>>_h(,M ՃԊ@BY.V2$bo|ƫԓH.!q Qg~3R W1%ζ,PaxhYTƘyj‡qDɄIec O#AĻöݐa_a4mh8cѮcAX #G(?(Ie'*5wo]B 料Ka3y1Yp5OiNjvnu/@h25X_Hb*DLE XN f 2{ `qhk}A;edBeV0, a >B@<Kqմ#vXӠh㔻~!g c1 *ŽTk`>o; g4OSGd56)r2 72!)|K ,}eR.=O$Cz&_v @E4׉P ǃH2 :?Ca+Ñߺ~H??ٙffPu* Se4{a  kQt@>{O wZv~>܀"]eP;;jKGi*H$YjT.L0+gw3/xa۫@Op?L 2G]97>f[]A,>g;#P0یʺoKXN4 >] b~ 'ǎ4Gǒ 9( jco$Kyh d:pEW/z=?=nʦ$Q Ϥx1mZڰsfk8$m.0vD9"ϻ@C;Dͩ;p|oO$#EDo#-+/xuܲMFI' tݑy#*g K֧4!m4WܬiOci|2 _n1g߿H/endstream endobj 629 0 obj << /Filter /FlateDecode /Length 4900 >> stream x\sqY|pUbNC|Uq$ϱSNF83QROw ԇׇD_7J{=7gg~]?ůVe3ׇ-j0ʔV[Kg98:Xl~ɡWml>/.-<`]Vea-v7_iE <}I zO\ŇJJUV.Zպ.uv7!߇X.@YV5)ؚCOUL-2eaee=3mf%`EJ1#rqݿ^~? vV)hF08XRЫTJڊ1Vf+6u&bMm8F 8+Gn\qa]n)6(xZf`M:Gr't)fI-Wjp@^7|}8N c ,[ٲ}H'R%҉rݜ*`IShЌ< t0D]\Š't;I>*< x&X *Y)U`x)5/e53 pvd >`S9Ƿ9nZmK%2(PR7j3vZTHG? Wg?Ef,l]g#Afs v%PJ^0rS7+O+y qØ0GNӂw1)c/}G@vwDQQ)lhptf#`l_46^_fs9Q*5ˁ \#@cR3%)n KU#4oi/mj%UX ZXb_peMm@~8akDD h80 O!" N }s)%P:E= b5<䚂称}FÂw<;&o1?.֩fBtS羁` D롿8[zXR $z< pШN p/Mff[?Wt4q}2Cℱ }q4RWn1W8'{LNGn.gԥí% kCmКx. PO=7ؠ " AGBMB~}Jc028'j xɰa?aamD3*.M(vO7`@"Ze& I{3r"Z`81smNujS<L5B}Egq#h ťC7yh7."x>$iߜ:Xz9ڟ fɔ: O`sܯ'VҐk*n0A4D6FNq Di0ZLh7>ebHM;g+5=R:CdDkSʡCYv4\,? )^$T`gprٻ;7Lk9CKjG22;VŨwD`χ~9"vM&Hl)d9⿧S\ZRQKZ&di@h=Xi@?QV>*|_/dT4Qa lY/! !'"2DE44e /R1WPakOhtL pI{h3h3zrTJO _>t`ISW]`ngq)+YHip]?7X a?B-zRAI2 `R&}i9-T?NiH$0`PP(9N.x1E%[4K23?L%afHs-bzp~5߰iRJIwKHIA/k*`2 ^/U߂aas k:/RiϷT)4_HP5Yw-]AUgw!OeCYdCiq'}4qV=WqÀ˾2l{\\44V5n\o&i@ݤuɷh1g\:="#9Nr\bĺx89{`! b9_xyED"^x&#pim} tl3ДydJ=iJ_)/ 5 "43S>B'D3l&co7i2Vd`fEg<ۑ oX-\jPv'q$9H@#撮2ʫsK4*/,2ǨaxqNT?S ELO1$jcY.nj(~}۟T~ALn1~nuϞ@wR*`1X:6=֞A-ƄЄ=󰻸!#Q3ƕH-m88AjbO~6V&aʌWiVta66O0J U5#^ʙ -0?f(@V$*&\%kdaS'J 10e,XK W'4xÚ3Tk޽.ܛoTDMo-߀[oūߚwaabM~낑_ӹ6kKdfE.$\%{fֻa{c3#Lh9aT̬$ ~„.nmM0?tQB_ԥU> stream xnor(fk23d,f Z% EMJ;?j AdyEټaVgf̝ß7?2ʲn9΍2 5_=>7tSk9yha%%GZbVT&FZҮ_MU+96GDҪ>]bu-M\ԈR욇z<*Na&c fЍCauM# ٻtѐ]~Z ^\X*-}u;#.V? 83略̍5v05(d7{%hj5hPpA$59-yWQيB:BB ]pՠce"A9،xp9@Tl=9^֐StFH@XM:i($UJQrzT5 7VV#RNŁڮQRF&QLJ)8M$( (rEi+(Wfh][7P~!egHf$\L2<'GI.p9`%C9_2A&.ތunRd:]sH'95:HN<ZO$%2xrI5[ Pa o C#͍σ65#M(v b 㰖y_VuK@VA]tܥDR9X(/ǀI r&5'X0٥FDcM\knjj륆f7EԜ"b෡ prs-!| 0n0>X*."ͩH\8.:W~]6x[w_e9mH!B:"G=bS ha~+NVHO]O$9<ASԮ]m~i>1"d{7Md*isD-;|nwfyOݗvt%=XDUdysEQYo T깷K/ NoGG2jJoO=m2/y*0~AUix7 .-qjp+utp7]{ĂKb6n$QLsQAKKǍc2 3l du28D9յa~5d\*LM4hyLxPnC+]"fD%^&&0א.|T(55yI)pkp;䅤*'iC=8BMD}Nc`pB1rX/Y҇)Q<_ddrl)rp@rJ>?AIUJ pƒ-^m Sn#I,y8t񛖥<6;_C@J$/2U/RHU>+Ur97o%J@ϕI%^BswFA!>} ew4t `B1Xә*j:3 (OcU(S9HEm/ATI-;ПLaKJ-XgN{Ҡ rJ@~($'@9( .|$8HA%đyt,?\(B^-#"c`Χxsܙ D]Li'STX2AO!s)_6jg`6/T{Q1 ɪ,XgUÁ6˾ũS;uo8\h$go.Gڷ2kKWMݼ3U6/ n>f]CVF/^[ͻpAB *` դ U@HBt@ҹPL=E:K 2Y4Gr g@s! 1~W.kf<<5/ޱT3i.)8k>TbBzə.U;+M<+7ވѴf'.(0XMйY8S~ *W+hD7.-! _Wm\#=ث>Q3/Os+qT!S* G$`R!/Gꠅ&P!׈?$Y3>vQY>Q$]؃2{ yP5h:N9Q Є1]$\"{qЫ~[o_[`π9E0鸎~*~/O K67 A np?Gh&ɊK0(FKA~jud6v#1ʱy!8lЇ b5u#!5$e ] ɺ0μ~PAܧZTWbu)ա*np j⪨Fj1!>n 7:n~4x/{׎0#.~-vO w'n ܶ^ΡǍB%m: y{\E\p;_A]b\}Esttߗ{CU/ˤL٦v3Sv掣XUka/lE!Ӏ?…Œ)]d\UiIi;]+mZw|ɗgQGRGmsszlJ-qx]W7jTGbojp3owCm_Xu/#{+<;esӓלō7t]endstream endobj 631 0 obj << /Filter /FlateDecode /Length 2294 >> stream xYݏ۸/7}(P!8V K\!WVb[7;$EjIsðH g3 }d~a͂٬֋oV0UlH\5$$SBl}X {KYB \RXXCSXA10nv9.``8~Y &z?WUWs5z8]D+n먄&ZjV#CY &Yj +djzE lzc7&Y1#$[1Qh)ASxfe3 mwuwS6^u7e[]1Eu|n#Cyjt^g¶MVSn6lϿAoV3j]5*꺷'ǹl}jڝF:Kӝ@q ('‰$bV[EqY#x qCg]>*2t}0p O #@glcjs:nsdֿpְ'seӆBn2nh*JY 2akE!swDSW cW{G{ڂd`O5ILu)lQ \$+.*6Ktő*YLk%dO|eȲɛ^ ̟9UI/(JMs.]" lM@΍u=E;R01:u{)3vz9OVԑ1gʰVqt { "]}2X&k&%NN*y 0*T܆͐hdu:.ѝ0< nȅ.EwHߡe"* bDQ0mr沭ЄAY̮-'U8 Ҹ4;,6`~q^<$ϣ k%& 2gĞ4" s%gp`ʌP_KWs &LUf3?LX6hRm)TH'Ѯh?<0 d˂YOfFAg1/quӍNI+#{wR!RfǞl2Upf{i]U< ^~hkE&]ۧzr㛜1ۥCqS`bïP0& gJ҆ 25-mC1}]Ci5u NCImC9ylNJҸfuXެnnn $K υ;OK<:BH |x3^Q!>ֳFF4by3^Oi"kAX;l5s$9|/|7mojScHwG8v)>ڶUȖ.=94>>&%?uvܠT/t?8No-b||O;NӜT$QxÊA2^ꇖ!*BU,Ze悑JɈE,pWUŖ <]TYP`[*%;D$SH>iX`JD 2^?쥞鳁]I,n+(2kc#@Gj۪n '$^%ژ2QoJI=P~^nqendstream endobj 632 0 obj << /Filter /FlateDecode /Length 3022 >> stream xZY7< {Zز8Xb`Z?x,Gc5$WT1=V04:zߎc_ގ;̷of^pul5 k+A5sc-uRg+=NR /'2--gN +C)w,\%2^NN*d)%7X=Zuy7ێ,2ڳp"wIzn%m?g67*f-FDˉJTjZ ՚[$b~/T+'ϯ+d}*b&(FX՛)؀WivbŴ Fʹ(r@-:0[C\Hc(B G0d/Ewҕ8RX6.?xƠ9D"#H|$d"E y)>pu#uN=Z:WjpHݲmVoݲ9_gJ׼"SP8gn xl; =!lv1o:'&MD5a;͜jԽ">2~1P ↂp֕<Əu&YOfme#<^uΰN..P֏*~n\r.u)ټnGo k ݁9qW@8nBÔP>$\N6]x5yVq]A4/T&s41u*aN DV%O.2UH#MGSL>L99Wkc6<> >& {VRi;徝=֮pppXHxFu2 ]&ͦ~"n9OGTtÏ2jH;(6D֛μqx= d̜6myic}>Yr y;Aͱ4(JI3Dj|$2_7(@5>Z߇ )=d)ˋeέfr(4gxt%DQD#%"[jD'{Jzb)I57-i5>.BAJTy/I%]h?J ͅ%l-ͅT:zX6^AOyե5 |N-xI *< fYxLMRWàR zS$}B? :,廇Gڅ !7%ܮ_(׵NQ1{qOYaM4 `'YD)$༬\h /i6'ɢ>W]So ѐ clt]Ss8>MLatEgBQ 6eHpjv#(R=9'A3~p̠eS[{MئF4Źj.wnuq6s_2i^.hdJ>l6\b~\z5;dw ׽;鎧v#raUf^__/ùⶹ٤+g&Inl7ER_\؏o=YMNatG.螮-6SW^mnQ(y; #.̎lh'E/߇띠g3 vj0/0Xź'D8J+4?`gВKR-3Ǯ UPpƐPGƯ] Z?H "ȨfdDRPݱh\lJa8a%AQU|L`[T_cЅul̑TTFP oBp'S ޶1]6\ C͏p(m <}MYcD1r0hMq_Hod3endstream endobj 633 0 obj << /Filter /FlateDecode /Length 6201 >> stream x]Ko$Gr֙:awZ`w$A(hdiQ)َ̬j6YicWDF|aѵlQ<ѷw'G5 |պαő-,[eZ'{l-Eisھ=5 O?s:xl^wk;% \oOuo4JѢaޥ.%<]]9^O\Rts߾V2m}~'!|n>q\d!in57US[d |6eX Z%Lٸi'WNe–G1Y'j딝gGh)Gk;3Cx,c?Y[}vA|0o.Ι\-`0&\,y۰Jd-amDwπvn`rLwHW޶\.I6ou5]$jC]l׫]Y:ZXll6Tҍ 3+{G#4Mi#)JXo]-UhZdcx5c̵fOšV*X+H}Xf9=65bVt"7mG%n҃q|Θ3`fg:gaTZTd]u^ dՉDHU}Ӆk g2V199gY?r}o-N=S8؃N&+13(wUVhn`͡x"N?o-qjpnC%ʖv C{9# .b+hv%"d`WY2ˡ5[ if [ (ͮτmfkߏ MM폳._k& /}+W٧my +Z$!6 ei4˫Q&lߟ|6| Z|vt4V0w)#5Q ݼY:%]F.|c 1q`<ӯH {j4@ϡia`d=btl DKl6Jhuw)n58$1g#0>N}ش܌NW;¯Λ>Nl~Mei1^5 nA8H_lU5CxU!Ykgk%nx,8>lBJtsR3<{YV 䞉+:HZz8V47#=K(N&N^M> i2Op75~]7eAUroؒMm`:wږ*0k a5vJDaP+ 08!9#SlLCfz73CP3Z=<=it&-ɡ%#m1v8t`oc8M?:̴F)ff8nfRIRk(Nxtx΋͢8!ZEeJDp22i9!coV*d|džwߌdTwH[2 j^tT&3 䩃/=81Nt pu#yÕx>/(Y9.: 9-\JM1:VDr'lR!~u!,Wއ(^/W__`2: -5~u\gŸ[n3ѵ6 ,Oˋ  gT1<!9G6[FjcލCi9@jr-<>+C L ˫A}x>(a؋Y$<< WsNcd72YzZ  yH sShG4S˓^o2\[O}ۖr#"QB"|}rlM42ʿ _7,%AX Sû&a8 ζFv w4`#Cr!2agmΗ}<$&b{]"u'XZE0bAɩ"Tԧ:&iW )oBURf. T; BlB Ҽd}{k6(#hFN$28u0p+`S#d`iءnCV=iJe]8A<ݍm..@z'Xx~:)OQ|CLyLls){%~ot;+T)? bC2*lI"m3iK e*֤3XWu_5 h[5Wy:zPD l>1wY4Ear*)1ԈMC"-9Ex SfpBѳ(plilkd@C-c̿ҵ^`II粍U&sPug299s(.pnu8[}#犘科DO( ˓9Y6_>o&L0CTd-j߇Ϡ@-ȼ_Hc[ͅ9ip4LfyqpR!yd0ͰyA +1Iœ$&=e TA;6_$b!ƤFkKCin3r'YFTՃ\#.sp(2xJ7n_E &SVdH&9&&qBǭ!i}n NC(j5 =C +Cj~_BƉ#U0 S1~96yU@wzs On@!XhގjKg^"R+uUS)Y4:_sЮzdUBH6}a:扼r󗖩 {Ǫq/Y)̪Ԗ.T 04s⎍izňC\L áv`ZH:-[U)AV>.SpAX=.,|GJP}B/g9'1wvꗕ1J[,?.\_eXl@y6ه\E{ҦJc)lv,rdkrʬr,FE h~T]gE/ꏢu[ س L/G2?]ۯK q˅KdTE_׷ASF2LҢCz!a$ Jb#SP7?u13g,G>L՘A7@ )ok%Cf*{-=}7Ƅj8gƦq~_V\@i/(~d͔18ٕiN'%;-m~v,H-Мe]<xwmzqA:eE,f]S1ȗ1F<8"8"$6`UJ#&" 9(.݁-:PyX.KYUbaQ -+.Rdk( eB/knֱ^S&}: PM<E缑 OULlP KӐ6Wvfo7S!R /}2I~AЪTɓXr[\,K& ͟rGլrW7YɌq̣΁!g$_})HV]ܞZ|j8ds[Qeqs éRO*To!(4)8O\]0A<34 2 [i2Ꙑ_ZOe~ח7fʙzU ^G=ڧL&D9^u{|:-P`7n>e#2_ŠhPvJ(.$`ɠR>;TWʼ+1ߥ̜'Q2+"I9#~\!>E3 a81!*XPxn|7M/z%[sK̽c4)f:u5&~-﫣r)endstream endobj 634 0 obj << /Filter /FlateDecode /Length 5046 >> stream x\K9r}[G{ma-#b#ʫx`G8ZȦfKE%M8:ID>L$݌|_j}f7g8};]g8r3g/3|ƕ53m丹].?R{}zwo:ӽ8^2k[^,v\Nw}>-zLm ="{Ƌ?}pJ)j.:G^e_.aOs^޸}vdxv=#Nj@33\Z+l>qh&u])`ă@ny(Ϩ-Z7{jPD9:% ]/ * OsoVw9Yh{ݷ-%oIvHɜ.\6*^-m[K&0|nQS*E Ix\Zyb º |/p鰤J4|\JRs5t;`9ʁ^1ɉ")*vrһDoэ9rvgFjh͙mp@W6q)Frk xsg4 tӀMq"G"c؞ q\J}IΖ3wbWQ$;$a)WU-nTa@e7jw(8cb izw'%Je̓yY;9TpU=>=R([ݖT.C2kHd T-p`qCMQyϘ)~ͷG"]l߄2U3;퐹 ,5:/;LE*7擢?mr}7KufBtw|?4Wg8++ . a7@祣;*uB?6@3)8BȱALVU_EbsoN<1+1&'/ZȆ5SLdU O)vCfJ $ibMD@ phL(TEp^^Gp] źL?t֋NL\l~XlIYy6[j};xXˊ=*51f8c?I&hiCX_9qtxKPIqP8@3ܖ -C!i,=XM>Y~'/ RfMN<Fc><'/O*v3yxw2 #/^ ̢y;mP}׆x28&)SۇC;xS &=!(b!rJMnvC9g^pKiĦh-'ԐS֛ՉJa٣B][7Øb{rop+$fP<',֧HML沺07t^OQi_Nc1 : :[}GF&ù|Yw/MyUT4dZ j6ۄ+.nv/D㲼gZX@Q LGJ0AO!Ǵ6HՠfgeE6*f&S(ePk$ 5G۪Lz{]@g8ԭ @k]HpSQ2I%@%o>Nlx&fJ 7iCgPĐOSt0##)7R(Fh0q8@l~7 {d >$6㋳H=}8c?AP  @oVgOWcf8f~?oioYMHDpN=n?¶7 @CG@I ^̳'C X|H`G^aيTFϱ @ =z.>OJ N bE8Ooϱwi]ۆa?Qj.LuDSw('`V& &\OutfCGW=Wز _$nFfl{22 5m @T7 YGZ{N >A)kt&Xw5{cdƐflut lZPjtawXs_Ы6$MYK4k{,{FI*<%x(ʪ~ulΦu}8Ȕ#i ^xLq;KXrE ~P]M\&5 TۣTڃIowJmB΄N뎠 eܬ ϴ,/yMU=<6+wk՗ULv+ Uo!LY9)5߻-%1!|inUMM8`bÙ )Q L1h/]&wTKJgՎ*]Otkjsq3!ާL#2)^V޸h -- * 8+C`w} ]ȋ(&E.J8n"( M)>V3xa+۶gd=WigXz*<X bp%kevTCy5hn\KM"X x;)mG.4],ðKpeH֖">ŐcC*~МlTk8*/'xiЕW)!D6jjd"#U iN5[y-{DI6w^_.բn:ⓢXSi꫔g7T[ &%T99P+m!6Õ;;?bݖB3hO!ۋ'y-) &)nG,v20¡{ti,q @i^SY3$`c A€4׊*r'b @)\Feflbc}8=_G o> _ _UC,"GF߂Gg}Pꄲp^HWs<ǃ46Qĭ* EwM4O)cN\mWT{9&A\^tAz w 'M "Xe=c;R7Tn>MmL*1u XcN&BE*G ڄ6rg.4uƚI'e\p}#R4d<#=C7W] *#Lh9}qxgUo0`K~J,K;9g,A*4!Ҹ >i`~ } .v ((ý0|ET/ͦ;v먆ӂ*(!^cU6[NEa _2 TpXS' I7<.=vJu}pЍTi^pY>껉 Y c'mo٬^; }YS0^֜x@ O1~sqOjk訷2 1O뷫wC_wg `endstream endobj 635 0 obj << /Filter /FlateDecode /Length 3415 >> stream xZY޼_ȋ/K#.Î. I6`[h4xT&[lI^Hlnd]_U?L#6>ሇrїqfQ'OK=9Y'oZɊ0,wSd= C= |yI3Rdf<@L'"L:&3N̔o3/KέO&jn&# t 鸥Z*zőUwe1TR?f\d0M^,ipFʸOU-(*. ۶Ys3#f]o&bNLfjKTERrGIQmkJ0mpIKV[WsC~xҵfoJOJw8X0zK7`@Ѵzv4pYSJ&yt$63;sܷQS|P!Nn 309?0Q}kAMm1lA Pi*GrH)xqRc@@/B1` Ć*]'lV ZiE2z*N67rv 3.Q0Wvacyߵ=]<5*%G5}ҽ b`VfT3 %on' e|{)_5M i9Ք3oWoH>S~@ I.uM0fk|ȃ !=[,׵]AgCǺur^/wI )^[f@,!<-ؠKT. ܯWg|Yq%SoپF]㛈9ے&9%8 7@,&>_ʢmOVߑl6IK i-B7GJmV#s2.V%^2k/NQ=v]U4MK"ouvP:9INn*9[M0 jʱp]ex%0TKP?u,yT9U"?/ (k'm+`)c|N!٧S̭YۣSP 󪳥:Ix [KD4~T폺< C@ C.vSiJgدdI@Jhk&˂jLt1Ċ[Ֆ]U@_yȲZ.y,nq^oZA3>q*#mݱo"_Jr*t ۺ?4+Odqq\u㒁 [.o6M`ԶT%}GZŦ4!Yzarz= l# (}Q7 w`o:e~1qn'iMh?۪Usx kW;.,^Az`"gQ `PjAKP3QnaXDNEIi k4 )K%Ac݈T W!鮍F)}t:v$D{հB9y;y0fN{{'ųB/*\tЇS@L- _f`+SUopֿYTFq_/ff {rBt֛R[qyÅw78w;;}w fA%#Z)j<>=cמ/xa Y AD8y$s ;΂EH<2Y)؟dY顎cTs3:A[Y__&" l`S ޸hfiJ"z~7U[7hyg_MCJhj=&X^COQYpAAԣ^oE\/\7S.i<=2u)FK.$2q_; 4 &vuxx;#kP\磌q Sw-cBV-wpPaYJ{LY#ux˶ {]RA94?۹;uTg>WT| WV\d"#f``]sUI`k D'z'hZ-nvW)BVŦCUHai~QPq j|srW<5T@'+sQi:ٌsU!V=Q_.7CڻOH}&xuyx*ıԌe,Ո@Oj\52bkBˇM^+ PǙ0^%yۯ#a^cKJ(ޔ J!q2KzLh6rVBX=dOcƑW1 N z둥׎ۥV4AdS,Cׄ?$TFJ$.% 6WY]1rI:9/ڤendstream endobj 636 0 obj << /Filter /FlateDecode /Length 2218 >> stream xYn#W_ERȠ)q&PdxG:Gr8CJ tbLXu30a\mɟ_sDs_O^q%fuM|3yKt& f(Ӳ̑LxAkr:N2m"(+#Mz]V9͵Ce̐4M")=^Y7bۨy-8z"r5L *c};l]o%3?:U·}ɧLhI]]W3U˾Ef"J%^ YL8W3N42l#H}YAl̰?SYsaa? Z%]4mw>2vyZLPB0]Q]G*w*Ia$Xfkp &Qi.R;I톪䢹 hKiܶ|AnR8Nv\? ӪmwMV"бp ' Ќ"E8 8Һo1\֩(wyA,]+ѱZC.'Bc&(uz@6/0đ-ڕz,>.EG;]v r{ؖ 4.dWdG n /S5ҹĒmڀݲ/['1ŝ~ 2 [J ⍗ĵ? HHLAHT8VZL'%c-K(A DvUi X l#Vg1@8Р'f~Mý):e0f~ 05oBQÔk:Fr$e&IFW4~9؂+紗d@p!owYR2P(4 !4՟%:6#8_W!'@ONa< %ynns GVvMhl@i9 ӋxpaEI$?H2S쏉f"9I92ԏCZL%mh&DAJ-jSv.G첆i fS&-X (=\~7?#C+R> stream xZێ}`?! dİ:qxc+@OpFJק(_raV:ugvrypgYm 'l=cR̗?`k%p{zUֱz S8dWLrneQjiyX솻.Bt# XM.Tpc}/)PzPJei*쯨xe*֭ӻI)ួqBf(lMm27E1[opyf!aG-.*cAaB4]u~p]_ׇmaK0 yܮ?O<.Х+X?\-`ۆKڴ0C8, C͙ %c7UUwg߼Hc5{Ћ+Mk, ץoO[SgImcG1t! $tte+6)GvJp767 -'h'֤ )1"A0!ԍ]*8A2€6^cP%@54QN:5uq!ÛU ! ő„ec:J6DnSMYء {-%= A66xT;p` Ѵ%C&1.LRsa͜m !>S=r8L'PRo|GU"gD)Rȡ9ʗ%Qb^ hh/C=_Wp:])vY/m <[j2n=c;dfӁj gj3BU4+W :ƄfY'+b>"-eX//He>I!' \ ӓ:oUl݌sҘrK>`}8Π\x q+- ('5RBDt Ѡ9, t፜.BdVo@g NU!]y)J{2DN/#6Sg!4VPV4eDžUk_e| n^Qs)֘iī_DΜp E!%yŔD q>o7pPp#RS^F~S!όJ 1y7dN 1J~{7mH.س.Gѻ|wn:> Q؟7 p$" p@ ^DXXÃ(6䘷-a+>cG5@^Mz& h\N{QeV!1VҴHt|Ir47\pk>-+8B!q C^f>IaC5Qp̣}mߔ@n$ZcÒY1YM7X'4>&#C鮗q氡jѰE h=D"HթRBpHVʪL0vMsS7,eK̘Q>q:hj!tbFAMQ gfLGPMנy I*9j:>CEHM1Q&Q9)Tacwٶx hqއz:l^!w]`XӺu }L!q,qʼlV1uռ| EemuF ^*ވqB<_ $C~e@32*8 "nK6+_qZڨ;;0U*m03Ŧ=Dx*/0ZN2V]gݨ0@n,1W#P$PtIK}$%̈Z^im%F%Eee9_|O p8SX~w3X=w\5{ ,F:` XA\whmN!i5X$P9ˋar?)h3}nd/ 1mpw wmgNNQUPmi~Na,9!ItO]Gto_LsePVuzum7A,&EOf8:vH4 `u'cpX3Refs>8Crj?fex^<*`6(CHQ+P7ˡ"6#pB`,<*弯 А7nIz= zg ծqhSZILb{Tszx(ZJJ *PZ fA [#@D`j{ nlv41 9Ϙ=&*>_:6[.Il V|(֍yQdȳR?\ ᣀ?MF+*i9",sXD͔B3%R_;qmq/T <+ŵSg!_ܥ#@cRtD[*eE#Z >Gendstream endobj 638 0 obj << /Filter /FlateDecode /Length 2929 >> stream xYIs>C3"/8U$c=T%5(Ip@PzAh6tx[ d_=ógĭ¿ϯTlly}̔PabܞA_c0s\` j D®FM5O/-вHCxD(wɋdϊ~smY2Mv_ͭuX? uυlt,.J7ߨM77߭±JΔ)j_VgW D lDtoЫr+"_z~SwݪzCW۶i\nRRقFzi|)V#mYjR t8v"n%|c5\E k/*1wl Hl7A"9hEAK,ԣߓ=[,8rsJ蘁挍j&4l>iOOpr 4l^( 5swLDMwarJ=XE O7:`F6> ( RQ&6iY.XH)wAnΜ)E6B Hc{a([ڗm٬RΛ@IID՛-5 G DA&~T fisI e$2ܗ$t[r.cp<ë, 9)կ6nSQ AyGy-uPp7i'\C.4Ixns14VD+he<"c~eV ~M ;6{-!ʮnX'ׇ[2Te[>KXꮾs,(wyXh!͍4@!: ؠM+# ^"CpŴBeQh^@H*%U7xo Oc[p&lGBhoV!mf Z?pÉ27!}'ڢ:V 4|a !\ks ;n1C`/TK:!16ף$dS'c䠆m֐a=%C_?ˇÓL'x`M5 fϟ ;:$`Vv>OX4'Xz[B,M\8LGO&)\;j'qW§.cg)RV|z-<2ֆ7Z#ia,Kg 7ېN9`պޭ.12="`. n:V@5>0=F )Ëut3ՇDťTZ}bH1C}X: G]xHL tl*AQ} #0XVcSΎ*ǦK.,캎5"jsTẠ*!7WE}Ы MXƥ.!i"L4*uB;m\MM)C1coG5f\>UKɌtj7t);P= +م&q@ n6](5Gc\p_[OCm5Q.jۜHJc$9j۷Sj!vyɝ*z, f|3kWW}L[OVݏ.!8m[߱&<էalp,=噯ēA9\B{#B/}w' !2 2c(22oP:Y:3DH""T@r{uUp]ke@sޙ*shUjܼoWz@-9VJtzh4?@)1 *o8_ۺ/s >)}nˠWlQ+n|p}q&'k2tvMW =F ҽ= yL0wN O f3(&5, `SoBor< M]8Veo@  ||xݠӸ/5B3G%_l-Xα& \Ê"[y ԄXݱ[H&<Z1ǽ˯Hv0, G]$LyJ۪c;xBi,%M?zq'uFB+XFuɏT64Ϯ6HxD{Bh,`=Z|>Li݆R򱛍`-!Cu b_C۠%oܳx[=ލ|RQn\:׉ WOQi n{}\&der># .`)[b1 au<7O&s %2h8dL}|5LK.rA?;ތ|,cq*:HiJ,F۱^tN&]LQA%ԳuZNwlŎBU[ ZĻPx8RN1ȳ8Â^g%j[]uc Si%N0uC'BɍSD8z.+7̂ttv VJzjZ}V]y[މTJ}\CrRq_[)J'].T}:̵d۷X 3|:~`?Z(E2~՞}S:&+׸QB? vgY~O= rRm.|v*%O. O|/t9M:q:fGvh77t j1Sjғ֠hx)u馒Q:^[~91^]endstream endobj 639 0 obj << /Filter /FlateDecode /Length 2462 >> stream xYKsvU.)8ʃX+)*Kp.kAhrII&rxv+1،Hħfl;<b<;3bWeƳ͈)=BZĉrm7S>H[|dL'F9o?\Ui\gKoW)^Y쯓֦ܲev>,"[ߔRΞ c@+.>v j Yމƙy2|i1hN9]CD[,1'4 WID;жq|EkC`,l^6x [z%Ռ e9/r߼)J.ъ:")@ŮBf WddRSyK}EԶ"2 tYdn{V/Wq64繍X_ؼ5hXo_Wm-5PU/:yc Wu٭[ܬo1z QM'ă!S%qМBWi!-_ FϖCW1gp%wgm~Bq&Ue"i|\[\4ћ!C鬍 )>g;Ep9'<¶_&E ill'KYjZ(-#?*X1~ܾzQ e\w{|Kʳ]~DzRYvN h68Tl6y -;˂lODLvZ'2v"8fMJf6ܙ!eR^;n?~g";#v,& AYFqTON[^f4swh5ф&vH ?^K Sm²<׌v&<.&jq |ܺI_w5UюC&zZ\ypipRub+“l^>>IP)?j!މ!O^yw^<̆5 M%>$f_:%M<5s'oĨ%K${_Df\_t"r%ՍK2tp{E#.kv:cThn=-=(yLWkiTy3^(Z r+ɾ ՍvTqw?PSQjb4ׁr5Fyeu8֛q>G*ˎ/va6 ^;O {ZX$i+_ zX23kbQP .Vr0">1ZW_Βw2~Ij~ endstream endobj 640 0 obj << /Filter /FlateDecode /Length 1769 >> stream xXr6d2X3tƋceAKTH@2I^Oc1wӈ8, @ 6d<gȘp lJ01Gy62F3c(u3,"ؠdJF-Q1ZaXht{ƠrMu汴@,Fs c~7;2L۳YG}-$c(_bbyF_F0hw .p*t^">~ *V8S&ƪ}!&&*&4圌LdZJɔ`-`lʨVj2H*ff$g I'=G_m۪4ySN2 nh{bUp9#:/7`|nlYi|Tθ\q5TafIq%nyvhK=JjTsM (r! VТiJ!f1 F*$ذ@FgиPEZi&~k*YY8 2j =LnZ~t/g;[rƄv c,VMZM&Q,e lEsi&$65'}?y(5 H]hߊ!P&@7I'hxhߺfw6qr0<%6[DW},hu|i2Ff(#)VTS1 MDIW*/.OG^+0!(O\B@uwpJ6a$$D˕`օ4)gm:,wYR(^ deWi[jF%J0jUx4QS[ANt>Lp>&pZIkP!n+P2GMgC+#-&Ϯ|] "J .T> IB̡$37(% M[sz]Vl}ǘNɭUBfh∙=MEڄZPŘSX FWb-ǻwfU\,~nq uγ>%aK{豿LؑC[0lk yYAP84E䠗L3(^vB-w͠si,8UVfo[mS0  ݯJKkDWLr>9 Sbh)&ӹ .2zaUNliG?P [1hXhme5u>sf']Ɗh.29 $4BѴ\bZ%uaJŏ=o9&s8 Y ]V=&g6i:$NS IЩvqvVmi \Gdz ' Frxp!5`1Dx\dfɄۿ!10nTi3 F4a|Q' ZB *O}}ztY|;N@6=ٿe$9J"w?q!\L3uL@ ZRpdSA,<>iu ^Σ4sG7]'ileB@> stream xV[o6~ׯKRd!{ VADKl~XC !8#1v#E_"K[2 ]4`HI qIl c.) m !ʠ8zN et 6ޏHA4';0|($)9}HRED[QhQSo1Lɀ0dP)  %^ Lt;N uR1LdZʁ_TPn2ktQ\.6-Ư:I2]~?ؠͤJ v2QmD014觡u&gvtXe~ҕe+WV|uc/SW ۢ+~M0jS({zW]/@/in{S"STGz\|LRM6(j_ڪ>;ak F/n~hxc{6wMt(3Ns4ڋ ΂:V orcZeDӑzHA<"O)uK> QZ8bqFqL"GDK >X`6<>HWjVXöhWMS7PKb?B1UN E鸞<1.9ԇآjNU-T,P :6C@蝭j[ly]>FO䳿F6`lۧ:t7C@/ǂRbWWjNc"[fQߪ/)@Թ* f"k_4i d@dgkAۆ(>5-kLc6Iw㙯SžKjp%vP\h')VhTƅ hhӓ,fe@cRW*ϛsu7%7RYV4z7?} kԎ~o*endstream endobj 642 0 obj << /Filter /FlateDecode /Length 176 >> stream x]A E  Mɢ1(,zI<Æ:K=/H:4VE t! 5Nԋ 7ޟ4|W gwCX@zeo$=mvqgF!2mg,.FTo1zGY9ߩY Y9endstream endobj 643 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 547 >> stream xcd`ab`dd M3 JM/I, f!CǮ<<,{ }G1<=9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C530001021QLR e2fówsO)SyxPendstream endobj 644 0 obj << /Filter /FlateDecode /Length 161 >> stream x]O10 R%BVUAp$@Uu8K结ϲ/==>)ul"~H0X ô2qAVٍDq:WeUo!树DSUmcm+͟PV(9K7SiZL!`,KS$endstream endobj 645 0 obj << /Filter /FlateDecode /Length 2624 >> stream xY_ooW՝.Iל䢗J Fe:dSO/Й].ŕi'F3zHc6:\ ̾6fdߘT*NiʆÆLXt$NN)y?GN܏[|W̉9BR^\g֜\e zXɔ'&?{Rn7n%oԐZW b[..EVƝv/8oKR_仁>;9~ًckУp矮έ|oO]]eMQI!u?;mޝ81=zV) -XO'u6/Vfu9;?ͳ:u<˳ zEIߍCۅ穫9O}@S55v +n{kn*Q6į7R*8g;l*IdH0!Gb,ywZD͂]Rq ^Yۏ'7G԰z4=ܓ&Æ4|[;fū!F. F~ҍ$ L jzQtnUUwIgg:P:C!FqN@1wT?^'pWt y2\}e"kLh:624 "U^'MtX;ד,X>G2 H3)Rr[&7`i d VsճZ%Byxf֗Q[1A- )I &*u?v,_nT#2kє~~&bDWT'nN/d!D6;u'|hAVN7FdD'j{ 8mzBqj$I BdWW:sG.HDr-K$ܩq8@dc55o3\ugVź*~=H3Àp\7&\=KSUc MY <]f^" Mm\&C9'IR#^ R Ѣc1S 9Ki`z aXϛMjos7Pe4">V`ksiFUJh7MPU"4p4O {8@V :,)lTH Jcp]#/)::ąJ$s``wH wPp2U#&CԓhP] Zəbg[V-@Pn@;rXN7:c:7}J͠cDv/bg~# 1?ZwEsEF>(aqdE,4U!U lMc(*'}4îQNe`AE-]mDRЎKwu%Ifnԕm5;dF;0%y~cp!8jqٷU&\Rx 4^hPn!d$h'Z\ ̡R3)B* 'վy0YBte4x 0  נ]ߪpf?.oQDߑjcB8lsQjRM'!| =֠ !LO8P Q"5Zzf/-'5M2\yS{V$%#[A \|d?UC䲽b~)xk/kΕ_s*ugD4Rk(7Mw|M]s{׵ϳ}@v ל*ͳ+{~w›bw˽,H)7l|3a"endstream endobj 646 0 obj << /Filter /FlateDecode /Length 2890 >> stream xZKsΙ\gCRbUI8^RbRK.dCX= H*sI@,_nAk/ޞٻ3.3VcYx-䵢naP S}Q/+!x'?,iM0:rXV&eʒ/#-wߟG]VUkȚRMwQSV^XZT)immTly\-^iԔ/tw9 .߼fQq[[XymJRî KjrTd=(V+P&ٽ`(XBemiwdQJWo}:]D?Y%ʐY]w_~]/n"I[\x~ٿj>}.I )%zLe?KP5FM'(\L 洎P3p;Qy(FT`*K6䒪 ԿY Rd}j9'-@l%h:\=*Ymb߰j(Yms)VWoKwX(Ix;WVֈQo'H_g A|"I Nl c\n>7٪`:48Ic b_ȀR>Y% 9!?Dž7FbCC*~D`|45*p)4$gRHqWz7%TFcH1 2}>aЏ {5(nWtMMmqWE6~v[lWSr?l7'Sz)0RDQqE02wWʏe>* r %HUTP(@{ :FOP "+[mrӏ]AQrRhzA0 b8d3F7ϕwLjO*˵u 1ZE^n\mM917f4m ]VÊ6I0P <xahv}A%84q@ҜЀv"׷( B&Ժa+lE!x?RɹgOGԸzNO 5)mTL4M_ArWÍIva;Ix`.9Oh(7^t%|@6dx1Fʹ;U `h@,N zc&8fp㱳 "V^ɾܓIMn\¤D0nqUe[+fM:> jh̍<ٌI-P|:ʼҜ"LԜZuI"XS\556`;u9ΏRҋ|\&m۩:3b^m`p=ݸ]̴P1nC@[Jӕq5j~ +1”sϿ U vF ?BB f?f$vMp8 [Ar}Q1s87Üs3&\!*S./ߞ|{Fͦ`HCcjq 64LG Q D!`2IS+[B_$\]CVwnH![VL {hXya諳%+B!rBFjꏢmݏAakkS?Gelq~ޜ{C1&r'xgj@ԺO>'ND8f:$efg͍X&PyU֨bטxLV{@1= 4ұ*3P >=Vyfl#s5}T0 <2*F,4D xwAy yn_{&XC4[l.Iƍ1QkHaI$l O穡& xB*?4Kwt1|,Vt!a-uI'zP cʃD\TZdEA23$_(] ?zMP 㯁șH?&ŏPa0FN`r 1H ?ÓtL)}<OخgMIt\Ħ<ӎ>dx4|moB'b:anό7\`3kC4$oWgYMendstream endobj 647 0 obj << /Filter /FlateDecode /Length 4083 >> stream x\Iyr)x p]>ȖJ*29$SiB2搒^4#JR:~ט&/gdr}uw'7g_D[%Nί;tbDK]Z.'˳BKl-xbٳbYm^Me-)Sⅻǩ-SR5%UqdwKMRr:G&Lt&, QEk% 5-i\eDSFa+hhp - v7Ȏi w;ˏJfAqwqȀS~ouRVra<#~Hɦ=q`O'sSv"JI$1)ᝆ0)[LoBW=S}_ 8} m^LukJ >MZF hbE7yu X[xBEVg3$ESa{lOdGOI"7hu>Ob^%LIt&{"5P2"4ŗN`@ε-Z*EХv>Sav`zS!SB*6kfC:2ܕ}_zwFl"eq0\m#dJ@979Z9 Vq7Mf/w_O%m=s{KiKC"tvu4i dA%&fagv,_bzDӞyU| T5Fvhb۪[jOkw_]#4tjQ=+)Ei\zӆz:v]wlg4LzH]D.L+B} ݂g>No.Np!!g`pv.8+n~e@)h翖Np lY4YqvS?u80yy%,A~QoUEaȕPwS oFtxz +՟rD$ʄQTRMA]DA8Ux q ttbcѨ>cP}&k XlX5M5_T8P ݃U( )!>*8Ƣ`B|g{ɶ恎n>lO~I?Uh!齜&FYOuA2 J*sgdI+ݺg7ᒨ>oO>fJ  ?}T.@PSj4P^*^+3b!ũSjơɶixFftIP2eKaǚ~?:%iEDЙ}c7U>p6iߗc|q 7LNq-Cp 'LQnQt̖ JS^+6y@n O(F *HJs?ZG'l@IF&9P:..Ί'a%l$Js"vʶGqʱ(BmZOpL㬵7N0W .Vku2HgGl:TF Tade5%~R_iOu$kmXI7Ds8d4y{yB|UÙJ:A=ˁOM?@_M:-ΫU |՜疁3wf?~ku@rM|gcy7S;&%Ӟa$ ?o־t?. daK9,)Ԏz1p X=a&Fl;b7:(//X E*=p_z"AbX<ˋ"j7͏ J&NeݨgeCDendstream endobj 648 0 obj << /Filter /FlateDecode /Length 4081 >> stream xr5gk*d0½/N0^/JR8UZh" -#;` ;`/{bfis,>9}-WUy^^p%*|arzY'R{Q~7frDBgʳ{ɴ+gF9}9?,˓{]*LUm'U#,2}:F(/;/e=EtuVOo\y̾l7ӺnO lJy$lTj>>q4Q+g >~U\+LyNdTYuڎ;v5"en/*{&;&g|ײ?9pg@#i C'yo۱6 l|\o[dk"HLߍ! Dϑ}+,4ˈ7RB:Xa<qp-82ULL%EglݕR@NCeǩJ(mX Hh>"+f? f=k'^)Y9 d7֮^y:jv[98͕S`>O1dYf8 AJ[(A- ;h IOFߐy1 ׻,#a .#o,Fv={UiȢ{?u7)4g?+Z#yw-Ird%7r vJHrp  *G: P 3\̪H^>p˜8Hs-@:f p;ZkU,\#}>Ƥ`, DB$45naWyNty? F =-MCCe>te Gv7}-ۦWUz钓l:CQw$` 9͖ŴIyC8\Ϲ2smNUFye.˖k0Y cǜ9^p< +OB6DP&򐦋;(.<.2 !d}BVF[>`l[޲ƀbsڊ{pc!. rI0OsJVaLSo_gR& "ϐʵ< yHQspm#l?[eZkWT붷z]9CBtQVj|RNh `u= XbBG/ÍW ƘUp0VqyL䩔=T#tѢ@?4ʿ8lWHƾͧXydY߄bUSK5si&T8D^S˳YssL CK1VX0Ռky@} l=rcyD*0?TEw[<8uhT&&#hp:=*8yø NnuOP{6>;:{&Q/t 5-Ձ&:["&!ru6[swvfqTـ *!8wA:)e膃M Մ?=8 njrנYAxTֲջ3W|h8qDqd& |i@O JAˑ`!LO2N$ G`@R|Ŏ:XB84}Y/fjQRye@% f@+0!Hdj~MAPIY2ӛ)^ETxKA moD oi宝/[@r*ԟ;!<.N1;Iv"Տ:mmLnuv`躉}po seu3l`X0Ӵ^؏;*;g`o}gIJXy)8 t>3N&zZA3Hk\ SW>n[#t[FDcFIO04E*)Ŵ|;sׇ4RuY#tY72 UզC&f`^i˨c?֮!Č4bᴁNnNBJEy F6#nC&btMХjJ2&i3/H*=iz9 t X!OYKXo.įE$)n ޑ@䳦Mc~@δ]Rف VӘŹ平8wT Y&x훁UɑԖؑi쉘wpMQ쾙}ӗT}6I8'#*Ai<14꾥/8e,DyV2im~9u_-֬?uv8|zv7]'vYEC+]lTƔ]Lr=욶'X*"|Sp^l1uq{äyiݡχ]aFAуyHb郣^T=UiWoγȁ\hhhr]3vwmπ~y`C4;}P@ 2 5Ly[gGyendstream endobj 649 0 obj << /Filter /FlateDecode /Length 2047 >> stream xX[o~{ES+ 4EiдR|aVmsf- r8s߹Ìb6'|v{p"hu6'=R^J7' -së56-6'}͕yUIve}6,u6JqSѥ*\_=JyLFnʹH_?I*uIN_e(.ZlN=3Ș*](S4.>Hߢ l ;X0cOI ok-fse(p;S67R5/WW$'ՅpfͅHmmm_<*78 4A, 6L2vd{T"`)ޕdݰ]\ :y da7MiKv&hG [-,-6/X)/4~gέ\͢ p9 }75Pxd]P*Zu(B:m ck)Њ 2 `ip 0\lA|D@* < dPnݮٳ%k:OFܬ BI#۝äE.D5emKR -!e& L;J`֗#͊C4*& qhɧ`o=9RL`%RkXK6ND|K vK"GvhO4DOSЀJ[lo7M%aX:d, YҺe?jHC>g.3Ba3 Ҽ1JJs,P t*j0B1Bzu<#[ H1%y^K+&ܯQ*hPU`? {"1">NJ% ll{:{zq"?1 P-W|]HɄm%־_$ Vpi})Vy6Nynt!.˩x03֞@tɉn nx`t_h5o-WuxW8,Chezǽus4FcT7+0gdӽAž/Tt_d5g]vASx9m]SAI^^{Y!dɔV.M=&֣ YL8U|a {ʿp3J+($~uGg*&-^"5`ZQB\;vA96އ0e'~7z7AMSc {Cop仨7K>I}:pg|薠j]]-8txel #=,Ѹv3נ|bZ;hQ$Ƕ=iQrg.G\"..nG\r?U.Uq 2`O$43!<ImM})H90wuiG5ۯ)Ѻ0,Â]Bn0[ #e'xO\44T|Ut6eWa__BJӬTRkq'U Jϳ䣺ðW;˔:_S5m nw/&nr"<:;fKuHxlu^P֟NW)#IZ@w`0̺NM^/FU8jrƛn q OkZф|BS{}[╁S{D2i95oܼb斡`i ge~h3#pD&(/fi"]Cy)~6 r[Qg8AtC QH|vyn4$ۤZ5[Ϸ,(DoplM%vh!~O!NgȪa 0Ǭw>4OfB@|o=:$V&/uqC.;pjnb8> stream xXo6Zcǰ hðb[a~PhYV&H")i;I;wwgK23#r+|* 6dΆ3d\ U&f.ݜea% j) jч7F +pST8{)J&O$9 m,I)b(1h+KhF&0c 1e S.v7dzEJl2ѫpqi! nJN4pUBYbA0,²ZRr!|AX))sӈL+{XCJ9_2[~-$zȅV}:Ym@=w9P9*[:fݏ(@q3} Dz嵧(FvsXIpH]ZZyCP'CH }H;p;v|a]F}l,|r)D0)qw@J#&hĬO M"Cp) ѽdq\Ab&=3 5JyK%D(+0¥^ G!`õ!" BhS" KB)OwQFb$B0,TP,L ? ev cuD QiU{ +Clnce;C(*QcQ 6 tM mA5\+7*)ڭ޹FMib~S_ -%%D õ]pLdƶ4ShK @Qw*\ '#j<r6b.Z+Ŕ4L 5& 3TeIK8}_YvQ >D&ŪݷM:bFu4O>=Be l:5 ڇD44ɮΊB>ip~\B$9ZM]DP8+jA&ٶiT"HEX&`FX?TgȾH|;z0q"T5" P/x.o`bfn  5L:0G4bmpPana \n}C M^Jrjud{sxrsdvG z-90=MR)g( (>ѴۡktAttH1դyZB ' _8ȵ6_Xߙ2f,?(` Pyg.WU1!d#?sr}Wx=iAH8U ɻW;fӇ6FwT~s_>m;pa=]nk8o*bLmx;qO1o˨K4*Dmkǡ+A:+ߪ߅E`7sܔ D&HLfIBJy(H׉妟3'%뤱+'*i|p\1+ͺ,',H̻%TΩ$PpY}zhCP)Ly ty>seYuw#PMnL]h 2<t(1}`W @t#L:~EqJU{crRVq>n"H/th/"WWWqUy*|}}mǓJ -7Q5yk{L&}t mAu" |:YM- '=SGD T&#$S21N(4=/ H$JSUDT>yZ"G (/ eb0,̩| IIքGN5 [*.5\3%o\<+KЀI#m?\d)w(Y%ԩ4[]M# R{L14+˿!Qdi'u'uo e\V.M}ڇ/<,?؃endstream endobj 651 0 obj << /Filter /FlateDecode /Length 3709 >> stream x[K%>Tdh-ǃ7 El%cdiN0"A䬇\I?0镥SJ᠁F?n@NM*G#FNŸzL*tc#O&LRUnb)Pir:մ*+% \q=qhuq>NT #-wX/9sYUl,J7ɇK"eŲ˧eW:OGŋqXutE2)Ż=6mJ߄'?+ \WJb?YMOͤdP>-XU.z,m(vM|Ef3tRv܂lwqgȦ, V+gMb]ڨrOslĠ% X R.3KʪҺniͯjqݑ֥ѧEv'u6!nqji 0L$i[MSUQjkMѬmכzu} eKw9J ~/:).I|=e?oߟa>8Ə޵f-sT@T$ b44D n''?|uZ<άԸӋٴKřmoMf|w۳nsq2[_0W/v>3*-J,/#;l^PǐPs۽%}A^t䓢aqL eaa}2|Jri)ȤK~Z֖t0pLedꏞS2b!rf%0Q6c5ҡֿvlM&*-rfocP+C6yh* |L[OBMS~YnmvXG3bDdTN)v9[RVDˍnY~UK04Y 7A$1aFCaÀˋU&X!Wʳ2SyJ3ҶiՈt[TPۺ2P, {b۵/"0L9 Au~uH{BVS]!TʣB$B|hPp ES'ԆvcǶ$-@M0&zS ?f@8!Pb݌P/]ކ zlbiES3B6q"p!t`)h(.{Z $?;]\.7Q8:NXIסgP{mǢ %,LQ5wg:@U@D'[5RbYXT2 XLeJu扪x1qK3 +TT+ 'CO)T*(`qScWa_s佥p{CM_` ,S`GMHRۥO~,x:Q 3 n|g 22bz\P6h ^TF;ZH]V_>uIC"#70-u $A7R0? IpQYǬԁX]FOGϙ]q!3k2Kͯ  @؉!SR (T !Dp $eCIcFuEl #SnU(BE!%EGch<{uAY<5k,~qG">l 5}=IWO%wOIVNc 14q=/cBx뫧{˪;1Wd† MT.S871A|cH bJhb(/ ?T 4罂z!4C9P3XZ1:1H O*68\XU7M e{ImsVn ո(ʑِzi+2x+we0GIo+q ) ޅ;"a_ ~(MZ,ho{jKUϷϮTϊ-NݦkNtxͩ+ 7MGmEϦ{^^~K&bEre$# LH$r1{C~hmLr:4Ä,~݋㞍wNWWͦl`[zsςn z~/ q?=LH{x~/{4Cce|=xyz~{֗q4D+vA~ޅyWIwsڰаcart9/~ QQ@UE|>xtB3Tbt`9|OyIH7wO{ܺ£DL_gkK=Z6~G͛͂\^}܂3 %ǽo.n_}/NzfN8gylMs`<<6FK-I0;.2( őѬąN~R4%]ŻxzV u0@ (c5Rok5\GQ3Fwxԅ237C u&c{1[dZ*OGWendstream endobj 652 0 obj << /Filter /FlateDecode /Length 3067 >> stream xZo/7?vBír$^K{/E| l$ӡ('n/:3KRRAPHHjvvCLL8>yuDӉo<=? sx'nb<+oN"V 5\k+amTª"/Y_NJd zDɮ}H Y<n`-Osy@4GE״{107l!S7 ̌g'/؅u`a%YiĕZeRf*%alvU[Wk0y\hqΕlQby 9]];xm,nD3Udt ^d>)AV7̖enuKEĸ)]`_'+ZS[ .4XW>](:␐y*Sjp8xPQ|ZhL3\! uİZ2 x[G [b39P^3C1y9%ѬwDo=G!Y6^ӵeZ|0r^l)Pm+z:bמ#M``4E"V쬁wX4;pP-ۮO; ?c(4b:2ggϟaR^ſUBpYˁLM;|x~f~tdZAѮCCh0$i&n30ɸ*^8h,$gX'KKm-PP+To/TkF< Kkt^ܚ]oQ0Pi_uc5{!GePεETBuP)ښ2K 0 0`L,X7G* o,X$ݜ'5A0A1~f\ 5b]xAtFS H=yIfӎrYx́@ķlNVy*B;߄vFr3&૶n_apQ` 68,j l;Zydӂ`SN}K[ *ȫxY=BԻ ډSRܹ_,nps.(T%[KD;Aӛ'#4@gqAxؼMã, zB9o 8ANMyí]MB=M##!c3y^w/] 86$,SgGխZA>-$zZ'AERJ<6'+I,ŀHP> I!Fx{Cy[&Q 5iqMWdU՞4͈·9 Cxo;pEIiޤ{S\hYAhJ6_>\_ |sjx@dXT7y_]zuGHs?-Mhpdžqt9.ӽԗoY !ż'#$,9# Ra5褜xZHЁyolp|zݺ2F;%FݓIG]OzN=Mkt`7CkIңmhWҢx(ryw@:U@i8=,:s G>SHkX(hj-5C7k(?l膃A-(`~4a$i 5tUCˆڐ;3t ?ң&\d\!eDz MV NB/INaR$M0fq9Go@F6g]Ҡ`b;zjUmzY~*֔]QL3]'f#S%kwi ౸ ydF1`>clsє?I@ ;@ (ˉ6yfChUf '%EVXczq fsDfU.vREB ;hWN! ]T6;HT~ "}#> u+%2alKDh,aMk+b=] #G@&%N[-dPCwyɽe1_XC9L+"W3$!L@z D̾9.JHCl~3>4ɴ1cCbpe(5)(DF.XIOq|b\2jx_b2K0%Ǭi.BmR,y e7-p) +3-!b̅O .W{>DQ/.OڍQyY!Cnԫr"d_@L XB*[|MŗRa1eͥ@02$%[zQqx}6$! 9S.Ew! ` N ~{j+z!t؟f×8F3\.ҰO- irsVumk#Bin'Oj휾i'=>g p>KP X{ I|?\A^O}~~ 2"l#lƉi="ӟnzߗE*O~O6G{}w(Mjޓt~8'yh|HmWuj;DNu.[p|bmbF׫s(jBN&AH%gC/eA^N#֒]W9lGGA'#< ~>vendstream endobj 653 0 obj << /Filter /FlateDecode /Length 2830 >> stream xZ[o~o( OtrIon@QUhK%E/E9Cu@ϙ 5CR E9g;"K"߇*[<]}yhoWW|OW4]2Ӌ\rj>7i䜥Z3C94yN 7l%<٘=`Z!\)UHJCjHL`iF#tx@.C2JFvcF@d6ҐЪ /^zp0'_]x࿁? P<שШ( ᦥt2-}%n&_' 2 d͍[oߙ6v}$GW:E4eUQn>~܃$GBAU^(P޲: >1pY}eHxgH 6Kgi+b饅Mm] !Gi>LF,؍Ԁ}=iI\Wv<+kؾ|++]8ݢ\vÚm9; u(`>֟,sʵC _)x 4ys6*P;&3 ZGǞw 4H>3s]Z:(8;W I@$s$0j̟@dhQСĐQOyR&%5=1P,Ɗ󒤉Ci^N5:'9%NQb*cb8bJ|q)iLiS2*ؘ_12d΂"}6O$q8*j=ry]`kk S6h1*3=TJDf&15 mzj8vy`ao(@MײyZ,Uu<؀acejTh:"O1\9uQ͇/,&-e k89=;n}KcJQup-rح'GepuP>1(2WΡ]X!H)Axzn9x>0l(cnnlz{$<qs*;bN4_%Iꪼ[ԍoaH̷  b̍3}/<%, g'LI0"pUzCwWĐ|.u? z"H膾9ﴧp^dmQx0?bUݬuw+?';Ipshe{5+;38AI0+4%wAk'?$w0# g ''x(NM=PiɈ0W HrH ɌgH#+eUG6KnH.Jss;ydF'S0=!9*nmFXz1p!+\hcPnؙ  /-+lP1*G4px/)K RFL(( x[ vI ̗1>WFPoȷO-RQP ToL0DDIa)R&[-&(c (!B᧫M<o}ھ7ߍ @c. >J!X<Nv۴ߕtMX5޾(0:7w}y ~sJfP<|4E|uyݘT݌aXPtry 4US^m8hVpD8)ٙPy8MZs@a$# Ks9b'ZTp!NnjSwG1 y̑!b‘9RX_QD,ace2&SDHU@fǔs ]6L3435J>bx]~972s-̘˄=_Ŀ)2Ң;4#^&7$n<*76g5['uHR*| 'wpÇx,]*Lhn3dM0:s;r;4\suendstream endobj 654 0 obj << /Filter /FlateDecode /Length 4763 >> stream x[r\GrA\1pǡ#a qMzM{Yu Y#y2OycF1y0/H7ퟛͷ5j7/t1Ő_8:h|aOKY%0}ҥp#2opiF}:qN#|XU£d eUq4& - ǻy}o: ؿe33G;̌k\>:߯/ȓ x&hG6:tlP9m{cvp_c/ٕ<7@3l6wy+is5rcP͈Hq)і ktspfn>a/fLo~᲋&? H>% PCAwi١$~LmLW$WU1DUJm7Zzކ$k"Im,),1ՎYX۬f+XJ"ϵI,%j !II\q <&Mu/.~ԋ; x6ǖxp~hzIUXĸ *ŀ h˜JY=rdHe) %sDDh 6cP?k`N.3pŅ\H8ytTH*\tI8< !TL̚8paQ /%\/*al&_Gh-IaZ6'h1Ψ57bh{f,jmAk ޡc,pAboSF͋ O!%+_`MAʌ=ŒQ3 Q `'JW#Ƚ2dG KkkVo=]"[IPZqF+RmG($G@ &5EJVPvI֚Ð&)ɏROFX zC9i} NHVPSB3dP&Tc^LC⬓HAkmj&6I"T߱Yv!+!eYk, r&HΚwsQC"kH~Q53S%eGm;"y[GR" g(DbXET֚% P[AHi- \Y*YUgy苈P=0iUP*m!4IhuB6tm[vWI xD!ɶ ?4%Т($֪fN4?kJ& }ĠÒ khE\nnc*ZZ RlN(eO(~X=$($ V"%j$)di6$N$vJcH0l0a>\έXc%GCkC IK \`|$ Ŀ Ѳ$q^xGP:Ȩ5ImCVFeIeeZMb{CbŸr/5'E/.`3rZB*:AXS4+0ी7teQUCUu6:#J5] JCY-IY;*GSgb?-Iy[|Ko\*$\j/R(K$$U1ƈoU-j7vz8v%3x%HԮ B"@ٸIU\~DQ5ΞF++GmϬ=BUF%4 saHA %iveD=HM,51X2"eٳhI!YRsI1l]@"w?U<49Y٨pJ͓kb8WAG9R6yfU::8^a*qٵn)6m0YX0BIJk`jO]B5iRiK6qY٩-uq Xm&)o׻2b)Wy5//^20ecuY++HKO0:_QN& =X c?sH zeqH0Cd3VU[V+^D@L-f ,R=+z`cFnJpVs/qLt]kmsIo'cHeJf=ΒxbJ,|a60UZeyTLbd'#]yzs1-= vko%f^zIYzpzCBc%sK.q髪.qB($.d44mA&E%-MRJHHq-w,Ymg/\黮DY& syk4+ʮaٜP㣖$a8Ityg)84G=/ʴsk# ͎0Nh J{i3 }D7k8ۄW,aT^& Y{\A nkH&B(/*~}{8Cș/QC}חE.ʬ2y_#T ȼ,o_JCdVޅWxu4!ǓᏇwG|vHeB>Mnx_ &/_AOOϗWlbk)]1<3GuU. cL2Akajn5ڮoUǖ){i{C[lh&#]|}nr<&2> 2?0ܵqc). .eyfxw m36Msby> ep:Z#V!݊njxO a.N|\am64V;Op<3/cfS:[w^w^n}VMl~}{I6ݣq8mfߟ< ~۩X-~Pܦ䰮 ҖcVjG+Ax˙{ЗwO:q C_~ǓdgCJnkeٮ@˾+q6^).SmGZ_n7LΚ}Fa+?NDSL#PҎx+nU>߹C/0os;_:%߼#T=}{^qrB7MWۡi{Ci qy/ͯv ߍ,DDoj2FM+B``!tn?Q@}owZ^omOwϿ8׎ͬ}D.]k[lQ+o=wCD"v6(r̠Owv^uxkI|wanY#xqGnMʕȕ *znw@ Jdsqu+j?~OW?s>,7# Qpo^vdlXչs=?[NK_4kVb>40F}]zAÇ="\_x|fUf~+2!nL$mMR([NޢRSnxppꀶl'z3=/sޭCMUe|N>.Ӓ`U"5L2UxG:9uNj #M[pեG|1_ś1%B ^WOn-L]uv]eʹ*ɑ0JgT/IU0endstream endobj 655 0 obj << /Filter /FlateDecode /Length 2594 >> stream xY_ gprkOC[`isv(P0k{wb{6l(iFIa5EQ$EH҂M:0?r_T0UXjty3{ؔI^(j* +t%/brfF Dɨ%wFl歠ʐ Hí%:]fxEE :$ Eeϋn_3X A]vgNF]2Ya&Ն|^uKr NuS8-S:/7lqRl0Z;䗿K'"96͊my|V"z5;Z)WYCn -.`@qxmlNEq½8&/:FLJy6oW UЂ U+ ܒwJV#M,B )'/Q*8'ö'0P <$@IBR] Knu}FiQ7'.j얆d>!nW`9ԒU}%LiciI;|9 QIx.ok%~wZ\ɷy%`$9>͚J.Q/|8: FPp,l9[B j y;UhXr5ךdͨb9zGpm xe|Z'47}S')*>WdhdVs@IJVh`*#7 &XF0KNH:ǂ{+9.JG+yvZ}8c 8fqgB1ΦHՒ*B\.q'/cl~ϺjzL̓J̶M'3iHTȥ cb4c:'2$?fݙ$6t Jjmi YT%dE\bF5sa1 %L$sA 2CQ#_QD=xƧ?Lǒ]Tۇdu3i4ƣRj_ٗs1 V!|K RMm#;x ) -d) (B27/>cI(V볛pݙ |RǭSa]AM#>kd@35ڠ~1QvE~iοROI$U3QKvq=0Ck&IYa dgMf9rS/ UUSϠ FܑΔ+,gKw?>sk>QƲʩ: p}5>7G)8ѭvh&AVmva3ѯR*NHP0䃧b@~] Hȹǃ~2r7Eg_xtxMۅ`u n;?A Pm2#}ׯ]v)KH{7[ݮa.Cef^a4АKxmOmy\ FJs ^]v@ECl3E QfNpSﷇxw MmMAMF M.=~6loZ~{xN؀ao.潶.dtLrm4XXDNǓbqEx'EpНfO KȄCWX>Eo)Ze/Y`R@εٔe DˌTPPX)ʹjSRqٱ}JP[fleL$x2ib]SHxfKc4)!cTWiwW؃S*<EE1e䘃kYU@ܗJbCu@$29@jbxu#~F0w^(ڇM"c$f\h;s90X"R \<6iRrUo b,X-@P_.ӥ_W)szrYC&1]mZ|U@`c%\k`o`:][?>t847DXu3֔C|{ DLj \%xIcGL4HaYn"})x0|TXe5})#ԺLJΓa=s1ymLUtN@C pB1֑oD" 8p"ǐv>I>l6nb]_36 8-zsּCC~ Քk0+o#d d>[maS_E^9Zh=bsF~K4%h{Z&[e9fgRC=G'$8'L?Onendstream endobj 656 0 obj << /Filter /FlateDecode /Length 2232 >> stream xYmonWF\M4/ %j>N>im3$WKWv.(b ܙR'tcf 5<+#c\m:UXGLeZZ)]yGIAn7Tfsd ofs4e̐uQVJ0W[ oRJx''df (CH-S1#s[N$)\ER˜͵P$ǶlCȟ9csλi6_Q?qv5\R@'V+ڗjeNA<SQƲQh_GLcoW ߜHn}璜a4[Iߢ8_'ڟ7`n]{۹K6CݗClԁ.!i) R?0_6O ʴzKBXCn㜅fCݕf|7.$kC*>["7 \uS[ Evzz*HSR4K6/ёy"isȟMOI#NvW5xhϕ3*SwN)w*㝓t3b;䋟=QurEfl>ޤs"7*G( (#ZE~ # ɸ\Ss΀QK'&?! ?aGBr>R$3pjs!'mKܧIIꌧ>Lk< 7͝:8 2ȄC%]_ /3Pj t| U/nq3c@S{7B P$Q?}?I7j,u篓8OP!endstream endobj 657 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 284 >> stream xcd`ab`ddM,M)6 JM/I,If!C+XyyX|*=E{"8fFjʢ#c]] iTध_^竧_TSHJHISOSIP v Vp 4TQF ^Ȣ} C_g:OtiQw_>ڢ%3\ҽTq ?oZȶk?s> ==3zz&L?guO}20 plendstream endobj 658 0 obj << /Filter /FlateDecode /Length 2773 >> stream xZKs>濐ʗ@O[IaS;(h*E[̱Dz)jn$@Hme| 43+N sw\m}wFf M)^X*x| LJ)ahE}#*m5u7)%ARD PYοFvBj+5S25}dS$h$9_ά1 se@ΕdF[; ee)aU-`UXb؛7F㚵hƔrK5J)xniK%{Y q0$J$N^ʰ=uc? )Fab5u`+Ox7 %誾<#[B]ˍN.1w Abo'€G S ,g&>Bx=?8Wn⹽_^;Ż[*R{2}ci%٭Z];gOg 0ۀh_Xngxv[AsDA ,R ymb_qE|ʉ: Ӊs]xO+D}UnYQ7m춄NxQ}Cp >H%0:>Bp>F,==?DRʵxD/ΞqGDV3ᖰaǖвRmhi ѹ}W5M( D 7?$.H9dB1еG7GN;nLf$귳يL(=`̑' oHa_8 X ^6RJXfRK:R9c(sLa G4K] d7)-D} :3Zbuw PMhej]z:$gWQs-N@Bܳ` :vdPN!oN 9M`9YpRR]g5SiP$i)DB]v4صgX\2mZVw$xDDJgHzNW}K F49CJA'WPL>SxV%&5Iy#Z<3AS'4Lۅ((w;x"\ZsGؙd<&r"7!o@?Jq#{$3܌n>6MH2QX ήqZyy@ji $Web[Iv1C>=e4ݕT h`킜b~OnZQ%hɠ3RBr82O-ys&2hA!r`yݐѿieA,h(}'$.њa) @0:"O&2Vw!1 rN0)d7_<a?T.EZͱēҥKUu_g*Pr! ض/%ez]A,72@9a)Bٹ{aN,rJX5Rl< 4 <"E6s`"4]m='_@a 7O'V1,SФHԃ3pD@~}|.Sx.L7z27 @C06iKjHrWb#x"puPCJ,b*LrY!(NJ)~6+jCt% ׂ8sE..x|n3AHmM[g"BqWm%/=pߟD>yqA2LsmcqMz| !endstream endobj 659 0 obj << /Filter /FlateDecode /Length 1097 >> stream xV]o6}o`EEÀKvRaAeG-l`$EɤK{xyFǽ E{M6 3B Gf39RQS(l0N(%P)>"NSbeyE`m}$JW9wX hv6B8KI)w>.b#'ƛf>fyB >)x۴~7/!>4U)C{십 E P a|Pmyp?.ocKB$(rHqGAu"zx27}Vd]βDiu'2t]U)Æ Cf4HzT*=/[HPaDoE/U#/<ca){ظlR) ?jjoQWUhew.OLA[!wrUMp8N g c.@hh8ȍ"6o} $]vMՁ8u𞖇l2bшA7A_' ;~D:G@4]sr M)gl ~X~3L uRWvա[b v-|[}[KˣyԎUaOCWs]fӟu}mV_ʢXx{áf]y<++_`)g|Aڼk~R`ꯋ#|m[eZ(hЁUʽYe_*0x dc)=E}N#KKYx {cHwH&0gh][kq'b:^{pZ/rBx鸻Cto6Xj3O;Jm)Fxc,P> stream xYsSŽnLۋK['=<2)Od^$RGIyw H$$b?~[88vף#j?oџohĆg[CǚDl;"BMfplxrl U:1l&LE~ԐIŒ'46$͋?>hu:Jd C*Lhb2BFqȲ"/,VTYosx'000AD15!{`$C M(61ɍ7Ιr8t<2J٩ϲ*M}L_*_f"8d]y)Xʼ87[ SI5}?L VPUD)%oFo:[Fq;*%h, DcL IV(1T+㚡gH N4%KU֑r(gmw#qrm! F`J t7rPZFlnv?R34umÉ:.S%䐗UUn: $P':Bɩ} 3TM$[x=y 'NLHӫJ]/phImȷm|MPr|zhB 'T'|5 G.u$!BbaxCهA(jR7律ν$ $cbp6B'XrI}>1ꞏ o4Yӎ菖)( ENT0syˋO ) h:~HP$Ղl&Ð =b /qč!F !̛CHc>旝 n U2$ͥ_azKf=Wc08بf2жj9`B!nr$3eCsvkßӤ=&rČRiO3g dˎ(߭PV|h6LcD@(7&Rֲ \մyOJeįQݴ m~Mu0 ]@hCO-N͗V-Sbڎ#x9ϼoo_8h<@.'P#{ KZOZXZQ1ؔ働mF<&3 ^dgQ$IE'B\*)chw2 k3e@մ!OX*="TAM$.D'H:k:m}*_ N#چ:C0nVuMGHe0 Я/8rl[YZ01 0˺/BM}py YCK s՜~q.̢侉Ǵ%Ȼۦ "J9-ToB\p LEx8R'Gx,JՃlݸ$7۝h!:O`7_jWٷ@ta|GZ7_6m0\'mhSh=V-x~`SPvH۶H퉾ngEkgk2'wИ:9?&})l^VkuM>ˋZY řCSxpl ]>6aWxiA\}wIvɞ*֦imVVף &endstream endobj 661 0 obj << /Filter /FlateDecode /Length 1131 >> stream xV[s8~`ڇI xg3y<$ai`ա5#n6v;8c \ lOl,'RO >l~4:o3t\n_ az4QͤŤCdb3'8@Qx tnֺnc҅h)L ZyԽQU$O%mEܜfq/ms/L_9J3J&獇׵ &GH% KOZro!|p,2Wy0Td=j9ږu Z'FST^D<(5ͳ.I ʧpz$Ϸ"]'p碕ceeR%yj2]LgڭݩhbG0es1:T"3ECoyD5ܡ`n8D?J!Mth W8oGE醎K>8r9AٔRuK VX=T vPwwRO4p5)l9 #@@Ek5CQ^ v[Lv&ZIUW1x1L;:pdS۟ڎFt:Kk+Wo+L?3>[ɦ}su11ʨWju[Wڈo3Whu`ݛh-B@Cl 4Ҳ6]RlUMʋ9}0v> stream xKsߐƓNTb&tfn$u-6tHZ~xD$~c3?.aǂۥo\~]2Yn, E)& 9KK j"W^VTf >1 gX`ʦ >"#Yx6O1%^mIbNyެ6 cJ4‰1g+(KagA)Мsa匉n)!1̆F 6Ld \mS]uA_Cl~n=߬ݥf螫pUenvLpnbu|۝ wމӼGCU9Smp_OڐnYÓ[O fJ?w̋"Fgrw.1Ƌh%T!Cjmy~ֆ1¡]V˗:pJ%njĒNeeҤ@Cٷ0OR\`\`c_C36]xIsqLdDL?ٜpd%ҭ)Jղ ޛhI"E4T_TstH<`~~ T3CZBB^&qξ&VI >Nz "qz5LԋNWF;'dಉqd۽!2+@s<`~JPACSZ'< x%bvҘX*x7fͦa %Мk`9퍃bѩo J32!^y }6]4iֻ.>4fƁFsi9~sSɗ&nd_/Z)[M un-tq$%+[sȕOE i8'f٦l{)7xYȣț!'ьPvB7ؐv89|ysSɏl 2f#fh`+/v&7P Lh.@-)( Qc=>v+k/8hߵMUPo]?3k(8+ \*w vcz"˩Tb;[p6@"楡h>m5,][2U3z/k B}ߴu;P2*cԴnoaՉ~="Nq:8kd۝ k3+G^^U,q蠧C!Rc#b7Nrd_40 qA'XDX]TLB0)c2'2VL@`BO@!Ǚ䠧 OO~_ó/oʀ J;gA +c_jLYc }Θ\ c*ExLG*"0t$dKȤ*]Cj!/aM>ZC' !n/iendstream endobj 663 0 obj << /Filter /FlateDecode /Length 1228 >> stream xVnF}WA.[h IZuP4Ka끑(D*$mW+J3gggdw]l'xz?DLd!Si&*24Nn/٫,I1 6h*j$rðhtƠj)f6bi,Ds c~+Ž3a&uvh *66^$GA:V0Ph aF{PM80cΔɸ ,vY1R9LdZJwnlrp@ G+ #+˴J:CX&f SQbPd k#kA#fzhU,E$ k}I#ݔpp N im5Uݿ/7b:"Ƽ,ioOZ7ҷHrX)vdKpPOސ][.E_=o񡯚=E.՛bݒ+|vNxPI1P-~XNח$-DrBM4VMߍDXm5r<:V"bsJb5 oqZkl/TTAe(HOYeuΗi9?^Cdece-Ob]TU2h[kh P +_s(ܸ faCz'˲+H@Qu/ .Nb (2E'#t3X#(=uSwPϲmi gHk\UuC9[2vx-> stream xXYo7~ׯCR>@Hk6ڕUV+;!~3Ә٘owӈqgr9zy/ Ɩ2s'r3"JN.Rd[jl㄃54ǯ sdsd5saԑޞF7sTzs&KeϏm5KK^MfR9D_]2-itHc>Ȍ rNs!{ARsF6'\D]g&"x&j>EWCr9nx\I-͡|Qθ#v0AfR:7ٜSM 1⡏]~kTZ-7EZs+?GD 3F SBP;|JEDb۲h7m۴ \MM >S&WlzL.0)jQ }l .esZZ&GL1Nq3Rn)gs~\4e]VE붫ڻb l&ۑL(q2EsN}nͶ5{_tmpϦK[ .8>+C/scY2+ PZh S_w1yp c9пL2gL4Ϣ}A }<ɐ. Hi`;f T +\ ,k-B1:- ,CaBU8F T˞usXUӔy<ʁp/P^^p%1) k^t]ރ>^VUcx_Wқxj.הE8eV7îhMƐ1C$ގOY\ n|RsW猄pܟ$YI;nF+lvA=xrqzX=D 9H:)S~Yd9~b5*&ÀL45`XͦǢP2Vi "Yٓ*+]sHyX ʅA})BP(j秗1A߉Q^XI>7iѸˆ[E,\r?kC_> ei{Fytzzx֗ndpp~wx ܱa=]r]`oh7({6I|Cdk}o xӆ]()_K.'[{zڷSV)vOHVja$/u> stream xWnF}WA [}8A[qR7m%Ka(D*蝅䌤8vDd͟oFx|=0"t蛷 8J5d<]NSX j&i2aZS.)L6PCIT$sZXQQ9o>2i(*)Wŵea)&EڟdF$c(_盈{E<zKE@[Zfcg=xgTnB?c)rm>]/M8' i&Du}D`!Xk1xTFX*KeL8 ,ԲMURӞxBR);] ⒡o^3/?XfRŐYk6Mh`[6W\@2A3*,8 ~A #u|aĐoNcкe7,lW=߀1\`b6oMtPI rH܀^͏wۢf> ̣!E7_/%yN,4zj$6jj^$Fd(ڠ@ZxU؀SzXFA  _X S0 6bz2psk%MrWpMٲpEEGr(gFq:&on> /w`WE}WF'AMo'Cΰt Ԅy҃LtʿN&D1nTP|> =Ykls+xjmY 3"#oՆ{!eK&T-u;* GR &rb9:pޒYO Du;A9/ZϔGH-.cRBɛ ze{TA&MBtafە4e"GUttpZU/cf崲  ؉Nqfw7[% :Jy܁a0L"qX 6gmHxX2Z. /]jJ4f,J 7Uq74ڶMY7 T<̺\6i5\24]לzR {-so'wݨԀOƭloܸq.߻u\e4k!dje}+Ч¦h Ulz}3uab14WWGśWŮPF\v ~Ѯ $O6gFo|Y𿄥kgߦ&**yO@io2<.MQʺÂ(wh!6 wg+<{bYVbpϻdg^۲v-8{~l9="u<>Pݪʶ߮LH6Nh7ߚ&p`\:=> \YЙpRe Ld*)pDc~t$?5 0-XHl1Ś0g\FՐP,8E22UEG "q2X<9w3}endstream endobj 666 0 obj << /Filter /FlateDecode /Length 1487 >> stream xWmo6_aF&JYk Q5Hrߑ"%RH,wsǁǁm> ƻձ?W> %?b<^ݍ<8S>^#$tEJ3H34V5ʓaJ@+ cTO_D1GP1m$LJdN=#,BO=ƸZYUR<‘Po5QJ]Ą:5A&%ЇbL 8ؖ(܈šG SP4/.L]Q6s19cVF_t\^΃Fc\ -sܙύ.Z(Y΃tc‘@!^FC=AdY,Ԭ1d<3nTVo* Q"UfY‧Ί6Q}:ڊPB*"F2gXg"3(ҰG1:`+^ԜZh}jXS> AΔ"dP> Y{}k{ ǰ_!H/<;cʲ*G*<k8aJ\M;̀*o5b4m2-k5&nd wOr:8̈CK F TZ. c1ں뷗cQU>T'uZ]x \Nf%-&{nj.www:l%77h>Px&T1T2Zs9]o/rAг e4ö GLu# Bnie.l}V՛^$gꠞw{gZ7|+0ok|b0]+dҶңA;tyAqb|לL2våҵ!}*Ou{>u`R26",?0H JAjFtL:K'3Y0[8lC22LQy 3>xKN)S9AřMG"Mg(&"E-7B5yΕK؍PYT #')a<#8T"4& BoٌcϺHbbۡSۿЌ #0ԐwQb jn#<>%÷FjV2s#C݌ҨA5?ڂeqX brh35Eºv6iy/꤬3{i ~2 ^Ah{c:W 4gXk>*}ܠvdUs`<ԇ?$ 3xsl6 x` B6|d?g^;l}:Pί@ `@-I ՕwnG.wJwGc ڲFSQV,ҍMNb(e:~297XT \Bya{~CaԌW?pN1endstream endobj 667 0 obj << /Filter /FlateDecode /Length 1705 >> stream xYK6{0KCb]`ӤEEQ@kkwJ#IwCJ2Gv.<8x>1|Ӳ׽7=bfͿi<}\OE +],铘F~“H119:1)EG`AHa( BW sffO,R(+i4$GȕfΝk"L<㨛+ 1iqOStLFAO:)3svFm'{1&E2p1,QQY ^8&H c5Gqp/Zvy36 1$hewI"-PIpFGu|a?8ZT 2ѺH뼑(JuÂ%3,%*+sElDا.Q/[]2guC5 Fկ(:#StiL_VE"WFP!:kq}@q?z= :m ^{,&lQB`*,n!9!QU?ӛ4Ov!FCr&-._gӺYLLH^il$eQ&,VZzw֍l?eZWcj}pm"b=ˢn+ы?jg6/2Vދ#&ނ8zأG J p[zt!cUchmVEL˺$z"wZ+vd+K_F^f-1\{|0hwyQXJIap ErK. bQ9N] =Ng"1HEb#mpD:ZŨ[E6]C+"4+1㫳p  Z7pאC c ;8C\60ЁTvΜ˳вN̫Ż!? h0.h 8>u{8#/B%{٫eV+wp3m|jWj_MtP}{qθD$a~}-C2r9ľ0L;o\ .`NACNK^6wPY^`֘58X*qpZm~NLu<=G9-hU#HB*Hӆ 5Q͆0BT١e2u[yC`an*h;zu/m~MV4v_g}?wnxeQSTȑM_DtR>aM xwsEN],#n#v6I|Yyg/uٸHQ'OI]Mt0ŷ{ɇz#p$;\eo&Zk@] mI'$6tpaendstream endobj 668 0 obj << /Filter /FlateDecode /Length 802 >> stream xeUr0 )X.3(m'"1'-Y#Je@ (ʨpV%%m\[Ư宼?TEuJ&9QԕF*]q7T !8q]E U0Zpjx~ ,is-ӓf.b.&iPDJ5|mAB,Nige}J!]O"UN8F8\2-_L];+d\]n~'|f\q})$5rys|2OΐWG]|XM~+쁨,M3Əmיc7ĦʟfovRñ ZyLZ6+N%үuP]y&x["{9>+3Q+s ~hn76Hh5]zin>3y`:aOp~}*>Pzœ> stream xWMo6G>THJTN",]; ۑRVQ{@(m  ~p8`:"Q7[/(0s6jm 2ᜉt#Kp'hL(}G(/ @H̦1t4f4~XiZJ$ -O.1 Tk\gxb DC.^ ,QDݴ„u(Gխ ZP(]6z Vp~y0(zrԃ- ԯP- Uhpʂ8QaZmNzg BꆆԵ$q]۳i6of_c9wB~;ϵ3xW;Z(զwE9 Q?؂ PgF=BVUm͡!A@,+&e3g%Nw~m7MK!#=ƭ/NУIfx^K'Iƥe9Y.ofU#.)B:uxڿAPP=%^; [n<?2u˜pHQ^j5)'C4$t_6g߻qN0bŽ۪`I֩(UP//ȁDGORendstream endobj 670 0 obj << /Type /XRef /Length 501 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 671 /ID [<3bd9cfd0b9f8e131d58d6ad60a01fddd>] >> stream xKqǿuM+"A<"b^t(2C'Oބn)bv=I:t $1[{ >;xx1󄯉m ZFxZ:,ަ7O,o #oq*gs60)&x"6;g~ގtw>7g;lʮrh2xoXEs5SGbEˍzBMqMd endstream endobj startxref 382655 %%EOF markovchain/inst/doc/higher_order_markov_chains.Rmd0000644000176200001440000003006615137702633022323 0ustar liggesusers--- title: "Higher order Markov chains" pagetitle: "Higher order Markov chains" output: rmarkdown::html_vignette author: - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D FCAS FSA CSPA C.Stat, Unipol Group address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedicato\_giorgio@yahoo.it} preamble: > \author{Deepak Yadav, Tae Seung Kang, Giorgio Alfredo Spedicato} \usepackage{graphicx} \usepackage{amsmath} \usepackage{tabularx} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} abstract: | The \pkg{markovchain} package contains functions to fit higher (possibly) multivariate order Markov chains. The functions are shown as well as simple exmaples vignette: > %\VignetteIndexEntry{Higher order Markov chains} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [Higher order Markov chains] formatted: [Higher order Markov chains] bibliography: markovchainBiblio.bib --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) ``` # Higher Order Markov Chains Continuous time Markov chains are discussed in the CTMC vignette which is a part of the package. An experimental `fitHigherOrder` function has been written in order to fit a higher order Markov chain ([@ching2013higher, @ching2008higher]). `fitHigherOrder` takes two inputs 1. sequence: a categorical data sequence. 2. order: order of Markov chain to fit with default value 2. The output will be a `list` which consists of 1. lambda: model parameter(s). 2. Q: a list of transition matrices. $Q_i$ is the $ith$ step transition matrix stored column-wise. 3. X: frequency probability vector of the given sequence. Its quadratic programming problem is solved using `solnp` function of the Rsolnp package [@pkg:Rsolnp]. ```{r setup, include=FALSE} knitr::opts_chunk$set(echo = TRUE, collapse = TRUE, comment = "#>") ``` ```{r setup_2, include=FALSE, message=FALSE, echo=FALSE} require(markovchain) ``` ```{r higherOrder} if (requireNamespace("Rsolnp", quietly = TRUE)) { library(Rsolnp) data(rain) fitHigherOrder(rain$rain, 2) fitHigherOrder(rain$rain, 3) } ``` # Higher Order Multivariate Markov Chains ## Introduction HOMMC model is used for modeling behaviour of multiple categorical sequences generated by similar sources. The main reference is [@ching2008higher]. Assume that there are s categorical sequences and each has possible states in M. In nth order MMC the state probability distribution of the jth sequence at time $t = r + 1$ depend on the state probability distribution of all the sequences (including itself) at times $t = r, r - 1, ..., r - n + 1$. \[ x_{r+1}^{(j)} = \sum_{k=1}^{s}\sum_{h=1}^{n}\lambda_{jk}^{(h)}P_{h}^{(jk)}x_{r-h+1}^{(k)}, j = 1, 2, ..., s, r = n-1, n, ... \] with initial distribution $x_{0}^{(k)}, x_{1}^{(k)}, ... , x_{n-1}^{(k)} (k = 1, 2, ... , s)$. Here \[ \lambda _{jk}^{(h)} \geq 0, 1\leq j, k\leq s, 1\leq h\leq n \enspace and \enspace \sum_{k=1}^{s}\sum_{h=1}^{n} \lambda_{jk}^{(h)} = 1, j = 1, 2, 3, ... , s. \] Now we will see the simpler representation of the model which will help us understand the result of `fitHighOrderMultivarMC` method. \vspace{5mm} Let $X_{r}^{(j)} = ((x_{r}^{(j)})^{T}, (x_{r-1}^{(j)})^{T}, ..., (x_{r-n+1}^{(j)})^{T})^{T} for \enspace j = 1, 2, 3, ... , s.$ Then \vspace{5mm} \[ \begin{pmatrix} X_{r+1}^{(1)}\\ X_{r+1}^{(2)}\\ .\\ .\\ .\\ X_{r+1}^{(s)} \end{pmatrix} = \begin{pmatrix} B^{11}& B^{12}& .& .& B^{1s}& \\ B^{21}& B^{22}& .& .& B^{2s}& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ B^{s1}& B^{s2}& .& .& B^{ss}& \\ \end{pmatrix} \begin{pmatrix} X_{r}^{(1)}\\ X_{r}^{(2)}\\ .\\ .\\ .\\ X_{r}^{(s)} \end{pmatrix} \textrm{where} \] \[B^{ii} = \begin{pmatrix} \lambda _{ii}^{(1)}P_{1}^{(ii)}& \lambda _{ii}^{(2)}P_{2}^{(ii)}& .& .& \lambda _{ii}^{(n)}P_{n}^{(ii)}& \\ I& 0& .& .& 0& \\ 0& I& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& I& 0& \end{pmatrix}_{mn*mn} \textrm{and} \] \vspace{5mm} \[ B^{ij} = \begin{pmatrix} \lambda _{ij}^{(1)}P_{1}^{(ij)}& \lambda _{ij}^{(2)}P_{2}^{(ij)}& .& .& \lambda _{ij}^{(n)}P_{n}^{(ij)}& \\ 0& 0& .& .& 0& \\ 0& 0& .& .& 0& \\ .& .& .& .& .& \\ .& .& .& .& .& \\ 0& .& .& 0& 0& \end{pmatrix}_{mn*mn} \textrm{when } i\neq j. \] \vspace{5mm} ## Representation of parameters in the code $P_{h}^{(ij)}$ is represented as $Ph(i,j)$ and $\lambda _{ij}^{(h)}$ as Lambdah(i,j). For example: $P_{2}^{(13)}$ as $P2(1,3)$ and $\lambda _{45}^{(3)}$ as Lambda3(4,5). ## Definition of HOMMC class ```{r hommcObject} showClass("hommc") ``` Any element of `hommc` class is comprised by following slots: 1. states: a character vector, listing the states for which transition probabilities are defined. 2. byrow: a logical element, indicating whether transition probabilities are shown by row or by column. 3. order: order of Multivariate Markov chain. 4. P: an array of all transition matrices. 5. Lambda: a vector to store the weightage of each transition matrix. 6. name: optional character element to name the HOMMC ## How to create an object of class HOMMC ```{r hommcCreate} states <- c('a', 'b') P <- array(dim = c(2, 2, 4), dimnames = list(states, states)) P[ , , 1] <- matrix(c(1/3, 2/3, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 2] <- matrix(c(0, 1, 1, 0), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 3] <- matrix(c(2/3, 1/3, 0, 1), byrow = FALSE, nrow = 2, ncol = 2) P[ , , 4] <- matrix(c(1/2, 1/2, 1/2, 1/2), byrow = FALSE, nrow = 2, ncol = 2) Lambda <- c(.8, .2, .3, .7) hob <- new("hommc", order = 1, Lambda = Lambda, P = P, states = states, byrow = FALSE, name = "FOMMC") hob ``` ## Fit HOMMC `fitHighOrderMultivarMC` method is available to fit HOMMC. Below are the 3 parameters of this method. 1. seqMat: a character matrix or a data frame, each column represents a categorical sequence. 2. order: order of Multivariate Markov chain. Default is 2. 3. Norm: Norm to be used. Default is 2. # A Marketing Example We tried to replicate the example found in [@ching2008higher] for an application of HOMMC. A soft-drink company in Hong Kong is facing an in-house problem of production planning and inventory control. A pressing issue is the storage space of its central warehouse, which often finds itself in the state of overflow or near capacity. The company is thus in urgent needs to study the interplay between the storage space requirement and the overall growing sales demand. The product can be classified into six possible states (1, 2, 3, 4, 5, 6) according to their sales volumes. All products are labeled as 1 = no sales volume, 2 = very slow-moving (very low sales volume), 3 = slow-moving, 4 = standard, 5 = fast-moving or 6 = very fast-moving (very high sales volume). Such labels are useful from both marketing and production planning points of view. The data is cointaind in `sales` object. ```{r hommsales} data(sales) head(sales) ``` The company would also like to predict sales demand for an important customer in order to minimize its inventory build-up. More importantly, the company can understand the sales pattern of this customer and then develop a marketing strategy to deal with this customer. Customer's sales demand sequences of five important products of the company for a year. We expect sales demand sequences generated by the same customer to be correlated to each other. Therefore by exploring these relationships, one can obtain a better higher-order multivariate Markov model for such demand sequences, hence obtain better prediction rules. In [@ching2008higher] application, they choose the order arbitrarily to be eight, i.e., n = 8. We first estimate all the transition probability matrices $P_{h}^{ij}$ and we also have the estimates of the stationary probability distributions of the five products:. $\widehat{\boldsymbol{x}}^{(1)} = \begin{pmatrix} 0.0818& 0.4052& 0.0483& 0.0335& 0.0037& 0.4275 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(2)} = \begin{pmatrix} 0.3680& 0.1970& 0.0335& 0.0000& 0.0037& 0.3978 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(3)} = \begin{pmatrix} 0.1450& 0.2045& 0.0186& 0.0000& 0.0037& 0.6283 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(4)} = \begin{pmatrix} 0.0000& 0.3569& 0.1338& 0.1896& 0.0632& 0.2565 \end{pmatrix}^{\boldsymbol{T}}$ $\widehat{\boldsymbol{x}}^{(5)} = \begin{pmatrix} 0.0000& 0.3569& 0.1227& 0.2268& 0.0520& 0.2416 \end{pmatrix}^{\boldsymbol{T}}$ By solving the corresponding linear programming problems, we obtain the following higher-order multivariate Markov chain model: \vspace{3mm} $\boldsymbol{x}_{r+1}^{(1)} = \boldsymbol{P}_{1}^{(12)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(2)} = 0.6364\boldsymbol{P}_{1}^{(22)}\boldsymbol{x}_{r}^{(2)} + 0.3636\boldsymbol{P}_{3}^{(22)}\boldsymbol{x}_{r}^{(2)}$ $\boldsymbol{x}_{r+1}^{(3)} = \boldsymbol{P}_{1}^{(35)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(4)} = 0.2994\boldsymbol{P}_{8}^{(42)}\boldsymbol{x}_{r}^{(2)} + 0.4324\boldsymbol{P}_{1}^{(45)}\boldsymbol{x}_{r}^{(5)} + 0.2681\boldsymbol{P}_{2}^{(45)}\boldsymbol{x}_{r}^{(5)}$ $\boldsymbol{x}_{r+1}^{(5)} = 0.2718\boldsymbol{P}_{8}^{(52)}\boldsymbol{x}_{r}^{(2)} + 0.6738\boldsymbol{P}_{1}^{(54)}\boldsymbol{x}_{r}^{(4)} + 0.0544\boldsymbol{P}_{2}^{(55)}\boldsymbol{x}_{r}^{(5)}$ \vspace{3mm} According to the constructed 8th order multivariate Markov model, Products A and B are closely related. In particular, the sales demand of Product A depends strongly on Product B. The main reason is that the chemical nature of Products A and B is the same, but they have different packaging for marketing purposes. Moreover, Products B, C, D and E are closely related. Similarly, products C and E have the same product flavor, but different packaging. In this model, it is interesting to note that both Product D and E quite depend on Product B at order of 8, this relationship is hardly to be obtained in conventional Markov model owing to huge amount of parameters. The results show that higher-order multivariate Markov model is quite significant to analyze the relationship of sales demand. ```{r hommcFit, warning = FALSE, message = FALSE} # fit 8th order multivariate markov chain if (requireNamespace("Rsolnp", quietly = TRUE)) { object <- fitHighOrderMultivarMC(sales, order = 8, Norm = 2) } ``` We choose to show only results shown in the paper. We see that $\lambda$ values are quite close, but not equal, to those shown in the original paper. ```{r result, echo = FALSE} if (requireNamespace("Rsolnp", quietly = TRUE)) { i <- c(1, 2, 2, 3, 4, 4, 4, 5, 5, 5) j <- c(2, 2, 2, 5, 2, 5, 5, 2, 4, 5) k <- c(1, 1, 3, 1, 8, 1, 2, 8, 1, 2) if(object@byrow == TRUE) { direction <- "(by rows)" } else { direction <- "(by cols)" } cat("Order of multivariate markov chain =", object@order, "\n") cat("states =", object@states, "\n") cat("\n") cat("List of Lambda's and the corresponding transition matrix", direction,":\n") for(p in 1:10) { t <- 8*5*(i[p]-1) + (j[p]-1)*8 cat("Lambda", k[p], "(", i[p], ",", j[p], ") : ", object@Lambda[t+k[p]],"\n", sep = "") cat("P", k[p], "(", i[p], ",", j[p], ") : \n", sep = "") print(object@P[, , t+k[p]]) cat("\n") } } else { print("package Rsolnp unavailable") } ``` # References markovchain/inst/doc/gsoc_2017_additions.R0000644000176200001440000001276315137710446020104 0ustar liggesusers## ----setup, include=FALSE----------------------------------------------------- knitr::opts_chunk$set(echo = TRUE, collapse = TRUE, comment = "#>") ## ----setup_2, include=FALSE--------------------------------------------------- require(markovchain) ## ----message = FALSE---------------------------------------------------------- library(markovchain) states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ## ----------------------------------------------------------------------------- ExpectedTime(ctmc,1,4) ## ----------------------------------------------------------------------------- probabilityatT(ctmc,1) ## ----------------------------------------------------------------------------- probabilityatT(ctmc,1,1) ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ## ----------------------------------------------------------------------------- plot(molecularCTMC) ## ----------------------------------------------------------------------------- if(requireNamespace(package='ctmcd', quietly = TRUE)) { plot(molecularCTMC,package = "diagram") } else { print("diagram package unavailable") } ## ----------------------------------------------------------------------------- states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) ## ----------------------------------------------------------------------------- if(requireNamespace(package='ctmcd', quietly = TRUE)) { sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) } else { print('ctmcd unavailable') } ## ----eval=FALSE--------------------------------------------------------------- # transMatr <- matrix(c(0,0,0,1,0.5,0.5,0,0,0,0,0.5,0,0,0,0,0,0.2,0.4,0,0,0,0.8,0.6,0,0.5),nrow = 5) # object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr, name="simpleMc") # committorAB(object,c(5),c(3)) ## ----------------------------------------------------------------------------- statesNames <- c("a", "b", "c") testmarkov <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0.5, 0.1, 0.4, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) ## ----------------------------------------------------------------------------- firstPassageMultiple(testmarkov,"a",c("b","c"),4) ## ----------------------------------------------------------------------------- transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") ## ----------------------------------------------------------------------------- transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) ## ----------------------------------------------------------------------------- if (requireNamespace("Rsolnp", quietly = TRUE)) { statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") predictHommc(ob,3) } else { print("Rsolnp unavailable") } ## ----------------------------------------------------------------------------- energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) markovchain/inst/doc/an_introduction_to_markovchain_package.Rmd0000644000176200001440000032063515137702633024720 0ustar liggesusers--- title: plain: "The markovchain Package: A Package for Easily Handling Discrete Markov Chains in R" formatted: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" short: "\\pkg{markovchain} package: discrete Markov chains in \\proglang{R}" pagetitle: "The \\pkg{markovchain} Package: A Package for Easily Handling Discrete Markov Chains in \\proglang{R}" author: - name: "Giorgio Alfredo Spedicato" affiliation: Ph.D C.Stat FCAS, FSA, CSPA Unipol Group address: > Via Firenze 11 Paderno Dugnano 20037 Italy email: \email{spedygiorgio@gmail.com} url: www.statisticaladvisor.com - name: "Tae Seung Kang" affiliation: Ph.D student, Computer \& Information Science \& Engineering address: > University of Florida Gainesville, FL, USA email: \email{tskang3@gmail.com} - name: "Sai Bhargav Yalamanchi" affiliation: B-Tech student, Electrical Engineering address: > Indian Institute of Technology, Bombay Mumbai - 400 076, India email: \email{bhargavcoolboy@gmail.com} - name: "Deepak Yadav" affiliation: B-Tech student, Computer Science and Engineering address: > Indian Institute of Technology, Varanasi Uttar Pradesh - 221 005, India email: \email{deepakyadav.iitbhu@gmail.com} - name: "Ignacio Cordón" affiliation: Software Engineer address: > Madrid (Madrid), Spain email: \email{nacho.cordon.castillo@gmail.com} preamble: > \author{\small{Giorgio Alfredo Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi, Deepak Yadav, Ignacio Cordón}} \Plainauthor{G.A. Spedicato, T.S. Kang, S.B. Yalamanchi, D. Yadav, I. Cordón} \usepackage{graphicx} \usepackage{amsmath} \usepackage{longtable} \usepackage{booktabs} \setkeys{Gin}{width=0.8\textwidth} \usepackage{amsfonts} abstract: | The \pkg{markovchain} package aims to fill a gap within the \proglang{R} framework providing S4 classes and methods for easily handling discrete time Markov chains, homogeneous and simple inhomogeneous ones as well as continuous time Markov chains. The S4 classes for handling and analysing discrete and continuous time Markov chains are presented, as well as functions and method for performing probabilistic and statistical analysis. Finally, some examples in which the package's functions are applied to Economics, Finance and Natural Sciences topics are shown. output: if (rmarkdown::pandoc_version() < "2.7") function(...) { rmarkdown::pdf_document(template = "./template.tex", ...) } else function(...) { bookdown::pdf_book(base_format = rticles::jss_article, ...) } vignette: > %\VignetteIndexEntry{An introduction to markovchain package} %\VignetteEngine{knitr::rmarkdown} %VignetteEncoding{UTF-8} keywords: plain: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] formatted: [discrete time Markov chains, continuous time Markov chains, transition matrices, communicating classes, periodicity, first passage time, stationary distributions] documentclass: jss classoption: nojss bibliography: markovchainBiblio.bib pkgdown: as_is: true extension: pdf --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=6, out.width = "70%") set.seed(123) library(knitr) hook_output <- knit_hooks$get("output") knit_hooks$set(output = function(x, options) { lines <- options$output.lines if (is.null(lines)) { return(hook_output(x, options)) # pass to default hook } x <- unlist(strsplit(x, "\n")) more <- "..." if (length(lines)==1) { # first n lines if (length(x) > lines) { # truncate the output, but add .... x <- c(head(x, lines), more) } } else { x <- c(more, x[lines], more) } # paste these lines together x <- paste(c(x, ""), collapse = "\n") hook_output(x, options) }) ``` # Introduction Markov chains represent a class of stochastic processes of great interest for the wide spectrum of practical applications. In particular, discrete time Markov chains (DTMC) permit to model the transition probabilities between discrete states by the aid of matrices.Various \proglang{R} packages deal with models that are based on Markov chains: * \pkg{msm} [@msmR] handles Multi-State Models for panel data. * \pkg{mcmcR} [@mcmcR] implements Monte Carlo Markov Chain approach. * \pkg{hmm} [@hmmR] fits hidden Markov models with covariates. * \pkg{mstate} fits `Multi-State Models based on Markov chains for survival analysis [@mstateR]. Nevertheless, the \proglang{R} statistical environment [@rSoftware] seems to lack a simple package that coherently defines S4 classes for discrete Markov chains and allows to perform probabilistic analysis, statistical inference and applications. For the sake of completeness, \pkg{markovchain} is the second package specifically dedicated to DTMC analysis, being \pkg{DTMCPack} [@DTMCPackR] the first one. Notwithstanding, \pkg{markovchain} package [@pkg:markovchain] aims to offer more flexibility in handling DTMC than other existing solutions, providing S4 classes for both homogeneous and semi-homogeneous Markov chains as well as methods suited to perform statistical and probabilistic analysis. The \pkg{markovchain} package depends on the following \proglang{R} packages: \pkg{expm} [@expmR] to perform efficient matrices powers; \pkg{igraph} [@pkg:igraph] to perform pretty plotting of `markovchain` objects and \pkg{matlab} [@pkg:matlab], that contains functions for matrix management and calculations that emulate those within \proglang{MATLAB} environment. Moreover, other scientific softwares provide functions specifically designed to analyze DTMC, as \proglang{Mathematica} 9 [@mathematica9]. The paper is structured as follows: Section \@ref(sec:mathematics) briefly reviews mathematics and definitions regarding DTMC, Section \@ref(sec:structure) discusses how to handle and manage Markov chain objects within the package, Section \@ref(sec:probability) and Section \@ref(sec:statistics) show how to perform probabilistic and statistical modelling, while Section \@ref(sec:applications) presents some applied examples from various fields analyzed by means of the \pkg{markovchain} package. # Review of core mathematical concepts {#sec:mathematics} ## General Definitions A DTMC is a sequence of random variables $X_{1},\: X_{2}\: ,\ldots,\:X_{n},\ldots$ characterized by the Markov property (also known as memoryless property, see Equation \ref{eq:markovProp}). The Markov property states that the distribution of the forthcoming state $X_{n+1}$ depends only on the current state $X_{n}$ and doesn't depend on the previous ones $X_{n-1},\: X_{n-2},\ldots,\: X_{1}$. \begin{equation} Pr\left(X_{n+1}=x_{n+1}\left|X_{1}=x_{1},X_{2}=x_{2,}...,X_{n}=x_{n}\right.\right)=Pr\left(X_{n+1}=x_{n+1}\left|X_{n}=x_{n}\right.\right). \label{eq:markovProp} \end{equation} The set of possible states $S=\left\{ s_{1},s_{2},...,s_{r}\right\}$ of $X_{n}$ can be finite or countable and it is named the state space of the chain. The chain moves from one state to another (this change is named either 'transition' or 'step') and the probability $p_{ij}$ to move from state $s_{i}$ to state $s_{j}$ in one step is named transition probability: \begin{equation} p_{ij}=Pr\left(X_{1}=s_{j}\left|X_{0}=s_{i}\right.\right). \label{eq:trProp} \end{equation} The probability of moving from state $i$ to $j$ in $n$ steps is denoted by $p_{ij}^{(n)}=Pr\left(X_{n}=s_{j}\left|X_{0}=s_{i}\right.\right)$. A DTMC is called time-homogeneous if the property shown in Equation \ref{eq:mcHom} holds. Time homogeneity implies no change in the underlying transition probabilities as time goes on. \begin{equation} Pr\left(X_{n+1}=s_{j}\left|X_{n}=s_{i}\right.\right)=Pr\left(X_{n}=s_{j}\left|X_{n-1}=s_{i}\right.\right). \label{eq:mcHom} \end{equation} If the Markov chain is time-homogeneous, then $p_{ij}=Pr\left(X_{k+1}=s_{j}\left|X_{k}=s_{i}\right.\right)$ and \newline $p_{ij}^{(n)}=Pr\left(X_{n+k}=s_{j}\left|X_{k}=s_{i}\right.\right)$, where $k>0$. The probability distribution of transitions from one state to another can be represented into a transition matrix $P=(p_{ij})_{i,j}$, where each element of position $(i,j)$ represents the transition probability $p_{ij}$. E.g., if $r=3$ the transition matrix $P$ is shown in Equation \ref{eq:trPropEx} \begin{equation} P=\left[\begin{array}{ccc} p_{11} & p_{12} & p_{13}\\ p_{21} & p_{22} & p_{23}\\ p_{31} & p_{32} & p_{33} \end{array}\right]. \label{eq:trPropEx} \end{equation} The distribution over the states can be written in the form of a stochastic row vector $x$ (the term stochastic means that $\sum_{i}x_{i}=1, x_{i} \geq 0$): e.g., if the current state of $x$ is $s_{2}$, $x=\left(0\:1\:0\right)$. As a consequence, the relation between $x^{(1)}$ and $x^{(0)}$ is $x^{(1)}=x^{(0)}P$ and, recursively, we get $x^{(2)}=x^{(0)}P^{2}$ and $x^{(n)}=x^{(0)}P^{n},\, n>0$. DTMC are explained in most theory books on stochastic processes, see \cite{bremaud1999discrete} and \cite{dobrow2016introduction} for example. Valuable references online available are: \cite{konstantopoulos2009markov}, \cite{probBook} and \cite{bardPpt}. ## Properties and classification of states {#sec:properties} A state $s_{j}$ is said accessible from state $s_{i}$ (written $s_{i}\rightarrow s_{j}$) if a system starting in state $s_{i}$ has a positive probability to reach the state $s_{j}$ at a certain point, i.e., $\exists n>0:\: p_{ij}^{n}>0$. If both $s_{i}\rightarrow s_{j}$ and $s_{j}\rightarrow s_{i}$, then $s_{i}$ and $s_{j}$ are said to communicate. A communicating class is defined to be a set of states that communicate. A DTMC can be composed by one or more communicating classes. If the DTMC is composed by only one communicating class (i.e., if all states in the chain communicate), then it is said irreducible. A communicating class is said to be closed if no states outside of the class can be reached from any state inside it. If $p_{ii}=1$, $s_{i}$ is defined as absorbing state: an absorbing state corresponds to a closed communicating class composed by one state only. The canonical form of a DTMC transition matrix is a matrix having a block form, where the closed communicating classes are shown at the beginning of the diagonal matrix. A state $s_{i}$ has period $k_{i}$ if any return to state $s_{i}$ must occur in multiplies of $k_{i}$ steps, that is $k_{i}=gcd\left\{ n:Pr\left(X_{n}=s_{i}\left|X_{0}=s_{i}\right.\right)>0\right\}$, where $gcd$ is the greatest common divisor. If $k_{i}=1$ the state $s_{i}$ is said to be aperiodic, else if $k_{i}>1$ the state $s_{i}$ is periodic with period $k_{i}$. Loosely speaking, $s_{i}$ is periodic if it can only return to itself after a fixed number of transitions $k_{i}>1$ (or multiple of $k_{i}$), else it is aperiodic. If states $s_{i}$ and $s_{j}$ belong to the same communicating class, then they have the same period $k_{i}$. As a consequence, each of the states of an irreducible DTMC share the same periodicity. This periodicity is also considered the DTMC periodicity. It is possible to classify states according to their periodicity. Let $T^{x\rightarrow x}$ is the number of periods to go back to state $x$ knowing that the chain starts in $x$. * A state $x$ is recurrent if $P(T^{x\rightarrow x}<+\infty)=1$ (equivalently $P(T^{x\rightarrow x}=+\infty)=0$). In addition: 1. A state $x$ is null recurrent if in addition $E(T^{x\rightarrow x})=+\infty$. 2. A state $x$ is positive recurrent if in addition $E(T^{x\rightarrow x})<+\infty$. 3. A state $x$ is absorbing if in addition $P(T^{x\rightarrow x}=1)=1$. * A state $x$ is transient if $P(T^{x\rightarrow x}<+\infty)<1$ (equivalently $P(T^{x\rightarrow x}=+\infty)>0$). It is possible to analyze the timing to reach a certain state. The first passage time (or hitting time) from state $s_{i}$ to state $s_{j}$ is the number $T_{ij}$ of steps taken by the chain until it arrives for the first time to state $s_{j}$, given that $X_{0} = s_{i}$. The probability distribution of $T_{ij}$ is defined by Equation \ref{eq:fpt1} \begin{equation} {h_{ij}}^{\left( n \right)} = Pr\left( {T_{ij} = n} \right) = Pr\left( X_n = s_j,X_{n - 1} \ne s_{j}, \ldots ,X_1 \ne s_j |X_0 = s_i \right) \label{eq:fpt1} \end{equation} and can be found recursively using Equation \ref{eq:ftp2}, given that ${h_{ij}}^{\left( n \right)} = p_{ij}$. \begin{equation} {h_{ij}}^{\left( n \right)} = \sum\limits_{k \in S - \left\{ s_{j} \right\}}^{} {{p_{ik}}{h_{kj}}^{\left( {n - 1} \right)}}. \label{eq:ftp2} \end{equation} A commonly used quantity related to $h$ is its average value, i.e. the \emph{mean first passage time} (also expected hitting time), namely $\bar h_{ij}= \sum_{n=1\dots\infty} n \,h_{ij}^{(n)}$. If in the definition of the first passage time we let $s_{i}=s_{j}$, we obtain the first recurrence time $T_{i}=\inf \{ n\geq1:X_{n}=s_{i}|X_{0}=s_{i} \}$. We could also ask ourselves which is the *mean recurrence time*, an average of the mean first recurrence times: \[ r_i = \sum_{k = 1}^{\infty} k \cdot P(T_i = k) \] Revisiting the definition of recurrence and transience: a state $s_{i}$ is said to be recurrent if it is visited infinitely often, i.e., $Pr(T_{i}<+\infty|X_{0}=s_{i})=1$. On the opposite, $s_{i}$ is called transient if there is a positive probability that the chain will never return to $s_{i}$, i.e., $Pr(T_{i}=+\infty|X_{0}=s_{i})>0$. Given a time homogeneous Markov chain with transition matrix \emph{P}, a stationary distribution \emph{z} is a stochastic row vector such that $z=z\cdot P$, where $0\leq z_{j}\leq 1 \: \forall j$ and $\sum_{j}z_{j}=1$. If a DTMC $\{X_{n}\}$ is irreducible and aperiodic, then it has a limit distribution and this distribution is stationary. As a consequence, if $P$ is the $k\times k$ transition matrix of the chain and $z=\left(z_{1},...,z_{k}\right)$ is the unique eigenvector of $P$ such that $\sum_{i=1}^{k}z_{i}=1$, then we get \begin{equation} \underset{n\rightarrow\infty}{lim}P^{n}=Z, \label{eq:limMc} \end{equation} where $Z$ is the matrix having all rows equal to $z$. The stationary distribution of $\{X_{n}\}$ is represented by $z$. A matrix $A$ is called primitive if all of its entries are strictly positive, and we write it $A > 0$. If the transition matrix $P$ for a DTMC has some primitive power, i.e. it exists $m > 0: P^m > 0$, then the DTMC is said to be regular. In fact being regular is equivalent to being irreducible and aperiodic. All regular DTMCs are irreducible. The counterpart is not true. Given two absorbing states $s_A$ (source) and $s_B$ (sink), the \emph{committor probability} $q_j^{(AB)}$ is the probability that a process starting in state $s_i$ is absorbed in state $s_B$ (rather than $s_A$) [@noe_constructing_2009]. It can be computed via \begin{equation} q_j^{(AB)} = \sum_{k \ni {A, B}} P_{jk}q_k^{(AB)} \quad \mbox{with} \quad q_A^{(AB)} = 0 \quad \mbox{and} \quad q_B^{(AB)} = 1 \end{equation} Note we can also define the hitting probability from $i$ to $j$ as the probability of ever reaching the state $j$ if our initial state is $i$: \begin{equation} h_{i,j} = Pr(T_{ij} < \infty) = \sum_{n = 0}^{\infty} h_{ij}^{(n)} \label{eq:hitting-probs} \end{equation} In a DTMC with finite set of states, we know that a transient state communicates at least with one recurrent state. If the chain starts in a transient element, once it hits a recurrent state, it is going to be caught in its recurrent state, and we cannot expect it would go back to the initial state. Given a transient state $i$ we can define the *absorption probability* to the recurrent state $j$ as the probability that the first recurrent state that the Markov chain visits (and therefore gets absorbed by its recurrent class) is $j$, $f^{*}_ij$. We can also define the *mean absorption time* as the mean number of steps the transient state $i$ would take until it hits any recurrent state, $b_i$. ## A short example Consider the following numerical example. Suppose we have a DTMC with a set of 3 possible states $S=\{s_{1}, s_{2}, s_{3}\}$. Let the transition matrix be: \begin{equation} P=\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]. \label{eq:trPropExEx1} \end{equation} In $P$, $p_{11}=0.5$ is the probability that $X_{1}=s_{1}$ given that we observed $X_{0}=s_{1}$ is 0.5, and so on.It is easy to see that the chain is irreducible since all the states communicate (it is made by one communicating class only). Suppose that the current state of the chain is $X_{0}=s_{2}$, i.e., $x^{(0)}=(0\:1\:0)$, then the probability distribution of states after 1 and 2 steps can be computed as shown in Equations \@ref(eq:trPropExEx2) and \@ref(eq:trPropExEx3). \begin{equation} x^{(1)}=\left(0\:1\:0\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.15\:0.45\:0.4\right). \label{eq:trPropExEx2} \end{equation} \begin{equation} x^{(n)}=x^{(n-1)}P \to \left(0.15\:0.45\:0.4\right)\left[\begin{array}{ccc} 0.5 & 0.2 & 0.3\\ 0.15 & 0.45 & 0.4\\ 0.25 & 0.35 & 0.4 \end{array}\right]=\left(0.2425\:0.3725\:0.385\right). \label{eq:trPropExEx3} \end{equation} If we were interested in the probability of being in the state $s_{3}$ in the second step, then $Pr\left(X_{2}=s_{3}\left|X_{0}=s_{2}\right.\right)=0.385$. \newpage # The structure of the package {#sec:structure} ## Creating markovchain objects The package is loaded within the \proglang{R} command line as follows: ```{r, load, results='hide', message=FALSE} library("markovchain") ``` The `markovchain` and `markovchainList` S4 classes [@chambers] are defined within the \pkg{markovchain} package as displayed: ```{r, showClass, echo=FALSE} showClass("markovchain") showClass("markovchainList") ``` The first class has been designed to handle homogeneous Markov chain processes, while the latter (which is itself a list of `markovchain` objects) has been designed to handle semi-homogeneous Markov chains processes. Any element of `markovchain` class is comprised by following slots: 1. `states`: a character vector, listing the states for which transition probabilities are defined. 2. `byrow`: a logical element, indicating whether transition probabilities are shown by row or by column. 3. `transitionMatrix`: the probabilities of the transition matrix. 4. `name`: optional character element to name the DTMC. The `markovchainList` objects are defined by following slots: 1. `markovchains`: a list of `markovchain` objects. 2. `name`: optional character element to name the DTMC. The `markovchain` objects can be created either in a long way, as the following code shows ```{r mcInitLong} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") ``` or in a shorter way, displayed below ```{r mcInitShort} mcWeather <- new("markovchain", states = c("sunny", "cloudy", "rain"), transitionMatrix = matrix(data = c(0.70, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.45, 0.35), byrow = byRow, nrow = 3), name = "Weather") ``` When `new("markovchain")` is called alone, a default Markov chain is created. ```{r defaultMc} defaultMc <- new("markovchain") ``` The quicker way to create `markovchain` objects is made possible thanks to the implemented `initialize` S4 method that checks that: * the `transitionMatrix`, either of class matrix or Matrix, to be a transition matrix, i.e., all entries to be probabilities and either all rows or all columns to sum up to one. * the columns and rows names of `transitionMatrix` to be defined and to coincide with `states` vector slot. The `markovchain` objects can be collected in a list within `markovchainList` S4 objects as following example shows. ```{r intromcList} mcList <- new("markovchainList", markovchains = list(mcWeather, defaultMc), name = "A list of Markov chains") ``` ## Handling markovchain objects Table \@ref(tab:methodsToHandleMc) lists which methods handle and manipulate `markovchain` objects. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Purpose \\ \hline \hline \code{*} & Direct multiplication for transition matrices.\\ \code{\textasciicircum{}} & Compute the power \code{markovchain} of a given one.\\ \code{[} & Direct access to the elements of the transition matrix.\\ \code{==} & Equality operator between two transition matrices.\\ \code{!=} & Inequality operator between two transition matrices.\\ \code{as} & Operator to convert \code{markovchain} objects into \code{data.frame} and\\ & \code{table} object.\\ \code{dim} & Dimension of the transition matrix.\\ \code{names} & Equal to \code{states}.\\ \code{names<-} & Change the \code{states} name.\\ \code{name} & Get the name of \code{markovchain object}.\\ \code{name<-} & Change the name of \code{markovchain object}.\\ \code{plot} & \code{plot} method for \code{markovchain} objects.\\ \code{print} & \code{print} method for \code{markovchain} objects.\\ \code{show} & \code{show} method for \code{markovchain} objects.\\ \code{sort} & \code{sort} method for \code{markovchain} objects, in terms of their states.\\ \code{states} & Name of the transition states.\\ \code{t} & Transposition operator (which switches \code{byrow} `slot value and modifies \\ & the transition matrix coherently).\\ \hline \end{tabular} \caption{\pkg{markovchain} methods for handling \code{markovchain} objects.} \label{tab:methodsToHandleMc} \end{table} The examples that follow shows how operations on `markovchain` objects can be easily performed. For example, using the previously defined matrix we can find what is the probability distribution of expected weather states in two and seven days, given the actual state to be cloudy. ```{r operations} initialState <- c(0, 1, 0) after2Days <- initialState * (mcWeather * mcWeather) after7Days <- initialState * (mcWeather ^ 7) after2Days round(after7Days, 3) ``` A similar answer could have been obtained defining the vector of probabilities as a column vector. A column - defined probability matrix could be set up either creating a new matrix or transposing an existing `markovchain` object thanks to the `t` method. ```{r operations2} initialState <- c(0, 1, 0) after2Days <- (t(mcWeather) * t(mcWeather)) * initialState after7Days <- (t(mcWeather) ^ 7) * initialState after2Days round(after7Days, 3) ``` The initial state vector previously shown can not necessarily be a probability vector, as the code that follows shows: ```{r fval} fvals<-function(mchain,initialstate,n) { out<-data.frame() names(initialstate)<-names(mchain) for (i in 0:n) { iteration<-initialstate*mchain^(i) out<-rbind(out,iteration) } out<-cbind(out, i=seq(0,n)) out<-out[,c(4,1:3)] return(out) } fvals(mchain=mcWeather,initialstate=c(90,5,5),n=4) ``` Basic methods have been defined for `markovchain` objects to quickly get states and transition matrix dimension. ```{r otherMethods} states(mcWeather) names(mcWeather) dim(mcWeather) ``` Methods are available to set and get the name of `markovchain` object. ```{r otherMethods2} name(mcWeather) name(mcWeather) <- "New Name" name(mcWeather) ``` Also it is possible to alphabetically sort the transition matrix: ```{r sortMethod} markovchain:::sort(mcWeather) ``` A direct access to transition probabilities is provided both by `transitionProbability` method and `"["` method. ```{r transProb} transitionProbability(mcWeather, "cloudy", "rain") mcWeather[2,3] ``` The transition matrix of a `markovchain` object can be displayed using `print` or `show` methods (the latter being less verbose). Similarly, the underlying transition probability diagram can be plotted by the use of `plot` method (as shown in Figure \@ref(fig:mcPlot)) which is based on \pkg{igraph} package [@pkg:igraph]. `plot` method for `markovchain` objects is a wrapper of `plot.igraph` for `igraph` S4 objects defined within the \pkg{igraph} package. Additional parameters can be passed to `plot` function to control the network graph layout. There are also \pkg{diagram} and \pkg{DiagrammeR} ways available for plotting as shown in Figure \@ref(fig:mcPlotdiagram). The `plot` function also uses `communicatingClasses` function to separate out states of different communicating classes. All states that belong to one class have same color. ```{r printAndShow} print(mcWeather) show(mcWeather) ``` ```{r mcPlot, echo=FALSE, fig.cap="Weather example. Markov chain plot"} if (requireNamespace("igraph", quietly = TRUE)) { library(igraph) plot(mcWeather,layout = layout.fruchterman.reingold) } else { message("igraph unavailable") } ``` ```{r mcPlotdiagram, echo=FALSE, fig.cap="Weather example. Markov chain plot with diagram"} if (requireNamespace("diagram", quietly = TRUE)) { library(diagram) plot(mcWeather, package="diagram", box.size = 0.04) } else { message("diagram unavailable") } ``` Import and export from some specific classes is possible, as shown in Figure \@ref(fig:fromAndTo) and in the following code. ```{r exportImport1} mcDf <- as(mcWeather, "data.frame") mcNew <- as(mcDf, "markovchain") mcDf mcIgraph <- as(mcWeather, "igraph") ``` ```{r exportImport2} if (requireNamespace("msm", quietly = TRUE)) { require(msm) Q <- rbind ( c(0, 0.25, 0, 0.25), c(0.166, 0, 0.166, 0.166), c(0, 0.25, 0, 0.25), c(0, 0, 0, 0) ) cavmsm <- msm(state ~ years, subject = PTNUM, data = cav, qmatrix = Q, death = 4) msmMc <- as(cavmsm, "markovchain") msmMc } else { message("msm unavailable") } ``` from etm (now archived as of September 2020): ```{r exporImport3} if (requireNamespace("etm", quietly = TRUE)) { library(etm) data(sir.cont) sir.cont <- sir.cont[order(sir.cont$id, sir.cont$time), ] for (i in 2:nrow(sir.cont)) { if (sir.cont$id[i]==sir.cont$id[i-1]) { if (sir.cont$time[i]==sir.cont$time[i-1]) { sir.cont$time[i-1] <- sir.cont$time[i-1] - 0.5 } } } tra <- matrix(ncol=3,nrow=3,FALSE) tra[1, 2:3] <- TRUE tra[2, c(1, 3)] <- TRUE tr.prob <- etm::etm(sir.cont, c("0", "1", "2"), tra, "cens", 1) tr.prob etm2mc<-as(tr.prob, "markovchain") etm2mc } else { message("etm unavailable") } ``` ```{r fromAndTo, echo=FALSE, fig.cap="The markovchain methods for import and export"} library(igraph) importExportGraph<-graph.formula(dataframe++markovchain,markovchain-+igraph, markovchain++matrix,table-+markovchain,msm-+markovchain,etm-+markovchain, markovchain++sparseMatrix) plot(importExportGraph,main="Import - Export from and to markovchain objects") ``` Coerce from `matrix` method, as the code below shows, represents another approach to create a `markovchain` method starting from a given squared probability matrix. ```{r exportImport4} myMatr<-matrix(c(.1,.8,.1,.2,.6,.2,.3,.4,.3), byrow=TRUE, ncol=3) myMc<-as(myMatr, "markovchain") myMc ``` Semi-homogeneous Markov chains can be created with the aid of `markovchainList` object. The example that follows arises from health insurance, where the costs associated to patients in a Continuous Care Health Community (CCHC) are modeled by a semi-homogeneous Markov Chain, since the transition probabilities change by year. Methods explicitly written for `markovchainList` objects are: `print`, `show`, `dim` and `[`. ```{r cchcMcList} stateNames = c("H", "I", "D") Q0 <- new("markovchain", states = stateNames, transitionMatrix =matrix(c(0.7, 0.2, 0.1,0.1, 0.6, 0.3,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t0") Q1 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.5, 0.3, 0.2,0, 0.4, 0.6,0, 0, 1), byrow = TRUE, nrow = 3), name = "state t1") Q2 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0.3, 0.2, 0.5,0, 0.2, 0.8,0, 0, 1), byrow = TRUE,nrow = 3), name = "state t2") Q3 <- new("markovchain", states = stateNames, transitionMatrix = matrix(c(0, 0, 1, 0, 0, 1, 0, 0, 1), byrow = TRUE, nrow = 3), name = "state t3") mcCCRC <- new("markovchainList",markovchains = list(Q0,Q1,Q2,Q3), name = "Continuous Care Health Community") print(mcCCRC) ``` It is possible to perform direct access to `markovchainList` elements, as well as to determine the number of `markovchain` objects by which a `markovchainList` object is composed. ```{r cchcMcList2} mcCCRC[[1]] dim(mcCCRC) ``` The `markovchain` package contains some data found in the literature related to DTMC models (see Section \@ref(sec:applications). Table \@ref(tab:datasets) lists datasets and tables included within the current release of the package. \begin{table}[h] \centering \begin{tabular}{p{0.2\textwidth}p{0.75\textwidth}} \hline Dataset & Description \\ \hline \hline \code{blanden} & Mobility across income quartiles, \cite{blandenEtAlii}.\\ \code{craigsendi} & CD4 cells, \cite{craigSendi}.\\ \code{kullback} & raw transition matrices for testing homogeneity, \cite{kullback1962tests}.\\ \code{preproglucacon} & Preproglucacon DNA basis, \cite{averyHenderson}.\\ \code{rain} & Alofi Island rains, \cite{averyHenderson}.\\ \code{holson} & Individual states trajectories.\\ \code{sales} & Sales of six beverages in Hong Kong \cite{ching2008higher}. \\ \hline \end{tabular} \caption{The \pkg{markovchain} \code{data.frame} and \code{table}.} \label{tab:datasets} \end{table} Finally, Table \@ref(tab:demos) lists the demos included in the demo directory of the package. \begin{table}[h] \centering \begin{tabular}{lll} \hline R Code File & Description \\ \hline \hline \code{bard.R} & Structural analysis of Markov chains from Bard PPT.\\ \code{examples.R} & Notable Markov chains, e.g., The Gambler Ruin chain.\\ \code{quickStart.R} & Generic examples.\\ \code{extractMatrices.R} & Generic examples.\\ \hline \end{tabular} \caption{The \pkg{markovchain} demos.} \label{tab:demos} \end{table} # Probability with markovchain objects {#sec:probability} The \pkg{markovchain} package contains functions to analyse DTMC from a probabilistic perspective. For example, the package provides methods to find stationary distributions and identifying absorbing and transient states. Many of these methods come from \proglang{MATLAB} listings that have been ported into \proglang{R}. For a full description of the underlying theory and algorithm the interested reader can overview the original \proglang{MATLAB} listings, \cite{renaldoMatlab} and \cite{montgomery}. Table \@ref(tab:methodsToStats) shows methods that can be applied on `markovchain` objects to perform probabilistic analysis. \begin{table}[h] \centering \begin{tabular}{lll} \hline Method & Returns \\ \hline \hline \code{absorbingStates} & the absorbing states of the transition matrix, if any.\\ \code{steadyStates} & the vector(s) of steady state(s) in matrix form. \\ \code{meanFirstPassageTime} & matrix or vector of mean first passage times. \\ \code{meanRecurrenceTime} & vector of mean number of steps to return to each recurrent state \\ \code{hittingProbabilities} & matrix of hitting probabilities for a Markov chain. \\ \code{meanAbsorptionTime} & expected number of steps for a transient state to be \\ & absorbed by any recurrent class \\ \code{absorptionProbabilities} & probabilities of transient states of being \\ & absorbed by each recurrent state \\ \code{committorAB} & committor probabilities \\ \code{communicatingClasses} & list of communicating classes. \\ & $s_{j}$, given actual state $s_{i}$. \\ \code{canonicForm} & the transition matrix into canonic form. \\ \code{is.accessible} & checks whether a state j is reachable from state i. \\ \code{is.irreducible} & checks whether a DTMC is irreducible. \\ \code{is.regular} & checks whether a DTMC is regular. \\ \code{period} & the period of an irreducible DTMC. \\ \code{recurrentClasses} & list of recurrent communicating classes. \\ \code{transientClasses} & list of transient communicating classes. \\ \code{recurrentStates} & the recurrent states of the transition matrix. \\ \code{transientStates} & the transient states of the transition matrix, if any. \\ \code{summary} & DTMC summary. \\ \hline \end{tabular} \caption{\pkg{markovchain} methods: statistical operations.} \label{tab:methodsToStats} \end{table} ## Conditional distributions The conditional distribution of weather states, given that current day's weather is sunny, is given by following code. ```{r conditionalDistr} conditionalDistribution(mcWeather, "sunny") ``` ## Stationary states A stationary (steady state, or equilibrium) vector is a probability vector such that Equation \ref{eq:steadystat2} holds \begin{equation} \begin{matrix} 0\leq \pi_j \leq 1\\ \sum_{j \in S} \pi_j = 1\\ \pi \cdot P = \pi \end{matrix} \label{eq:steadystat2} \end{equation} Steady states are associated to $P$ eigenvalues equal to one. We could be tempted to compute them solving the eigen values / vectors of the matrix and taking real parts (since if $u + iv$ is a eigen vector, for the matrix $P$, then $Re(u + iv) = u$ and $Im(u + iv) = v$ are eigen vectors) and normalizing by the vector sum, this carries some concerns: 1. If $u, v \in \mathbb{R}^n$ are linearly independent eigen vectors associated to $1$ eigen value, $u + iv$, $u + iu$ are also linearly independent eigen vectors, and their real parts coincide. Clearly if we took real parts, we would be loosing an eigen vector, because we cannot know in advance if the underlying algorithm to compute the eigen vectors is going to output something similar to what we described. We should be agnostic to the underlying eigen vector computation algorithm. 2. Imagine the identity $P$ of dimensions $2 \times 2$. Its eigen vectors associated to the $1$ eigen value are $u = (1, 0)$ and $v = (0, 1)$. However, the underlying algorithm to compute eigen vectors could return $(1, -2)$ and $(-2, 1)$ instead, that are linear combinations of the aforementioned ones, and therefore eigen vectors. Normalizing by their sum, we would get: $(-1, 2)$ and $(2, -1)$, which obviously are not probability measures. Again, we should be agnostic to the underlying eigen computation algorithm. 3. Algorithms to compute eigen values / vectors are computationally expensive: they are iterative, and we cannot predict a fixed number of iterations for them. Moreover, each iteration takes $\mathcal{O}(m^2)$ or $\mathcal{O}(m^3)$ algorithmic complexity, with $m$ the number of states. We are going to use that every irreducible DTMC has a unique steady state, that is, if $M$ is the matrix for an irreducible DTMC (all states communicate with each other), then it exists a unique $v \in \mathbb{R}^m$ such that: \[ v \cdot M = v, \qquad \sum_{i = 1}^m v_i = 1 \] Also, we'll use that a steady state for a DTMC assigns $0$ to the transient states. The canonical form of a (by row) stochastic matrix looks alike: \[ \left(\begin{array}{c|c|c|c|c} M_1 & 0 & 0 & \ldots & 0 \\ \hline 0 & M_2 & 0 & \ldots & 0 \\ \hline 0 & 0 & M_3 & \ldots & 0 \\ \hline \vdots & \vdots & \vdots & \ddots & \vdots \\ \hline A_1 & A_2 & A_3 & \ldots & R \end{array}\right) \] where $M_i$ corresponds to irreducible sub-chains, the blocks $A_i$ correspond to the transitions from transient states to each of the recurrent classes and $R$ are the transitions from the transient states to themselves. Also, we should note that a Markov chain has exactly the same name of steady states as recurrent classes. Therefore, we have coded the following algorithm ^[We would like to thank Prof. Christophe Dutang for his contributions to the development of this method. He coded a first improvement of the original `steadyStates` method and we could not have reached the current correctness without his previous work]: 1. Identify the recurrent classes $[C_1, \ldots, C_l]$ with \texttt{recurrentClasses} function. 2. Take each class $C_i$, compute the sub-matrix corresponding to it $M_i$. 3. Solve the system $v \cdot C_i = v, \, \sum_{j = 1}^{|C_i|} v_j = 1$ which has a unique solution, for each $i = 1, \ldots, l$. 3. Map each state $v_i$ to the original order in $P$ and assign a $0$ to the slots corresponding to transient states in the matrix. The result is returned in matrix form. ```{r steadyStates} steadyStates(mcWeather) ``` It is possible for a Markov chain to have more than one stationary distribution, as the gambler ruin example shows. ```{r gamblerRuin} gamblerRuinMarkovChain <- function(moneyMax, prob = 0.5) { m <- markovchain:::zeros(moneyMax + 1) m[1,1] <- m[moneyMax + 1,moneyMax + 1] <- 1 states <- as.character(0:moneyMax) rownames(m) <- colnames(m) <- states for(i in 2:moneyMax){ m[i,i-1] <- 1 - prob m[i, i + 1] <- prob } new("markovchain", transitionMatrix = m, name = paste("Gambler ruin", moneyMax, "dim", sep = " ")) } mcGR4 <- gamblerRuinMarkovChain(moneyMax = 4, prob = 0.5) steadyStates(mcGR4) ``` ## Classification of states Absorbing states are determined by means of `absorbingStates` method. ```{r absorbingStates} absorbingStates(mcGR4) absorbingStates(mcWeather) ``` The key function in methods which need knowledge about communicating classes, recurrent states, transient states, is `.commclassKernel`, which is a modification of Tarjan's algorithm from \cite{Tarjan}. This `.commclassKernel` method gets a transition matrix of dimension $n$ and returns a list of two items: 1. `classes`, an matrix whose $(i, j)$ entry is `true` if $s_i$ and $s_j$ are in the same communicating class. 2. `closed`, a vector whose $i$ -th entry indicates whether the communicating class to which $i$ belongs is closed. These functions are used by two other internal functions on which the `summary` method for `markovchain` objects works. The example matrix used in \cite{renaldoMatlab} well exemplifies the purpose of the function. ```{r renaldoMatrix1} P <- markovchain:::zeros(10) P[1, c(1, 3)] <- 1/2; P[2, 2] <- 1/3; P[2,7] <- 2/3; P[3, 1] <- 1; P[4, 5] <- 1; P[5, c(4, 5, 9)] <- 1/3; P[6, 6] <- 1; P[7, 7] <- 1/4; P[7,9] <- 3/4; P[8, c(3, 4, 8, 10)] <- 1/4; P[9, 2] <- 1; P[10, c(2, 5, 10)] <- 1/3; rownames(P) <- letters[1:10] colnames(P) <- letters[1:10] probMc <- new("markovchain", transitionMatrix = P, name = "Probability MC") summary(probMc) ``` All states that pertain to a transient class are named "transient" and a specific method has been written to elicit them. ```{r transientStates} transientStates(probMc) ``` `canonicForm` method that turns a Markov chain into its canonic form, reordering the states to have first the recurrent classes and then the transient states. ```{r probMc2Canonic} probMcCanonic <- canonicForm(probMc) probMc probMcCanonic ``` The function `is.accessible` permits to investigate whether a state $s_{j}$ is accessible from state $s_i$, that is whether the probability to eventually reach $s_j$ starting from $s_{i}$ is greater than zero. ```{r isAccessible} is.accessible(object = probMc, from = "a", to = "c") is.accessible(object = probMc, from = "g", to = "c") ``` In Section \@ref(sec:properties) we observed that, if a DTMC is irreducible, all its states share the same periodicity. Then, the `period` function returns the periodicity of the DTMC, provided that it is irreducible. The example that follows shows how to find if a DTMC is reducible or irreducible by means of the function `is.irreducible` and, in the latter case, the method `period` is used to compute the periodicity of the chain. ```{r periodicity} E <- matrix(0, nrow = 4, ncol = 4) E[1, 2] <- 1 E[2, 1] <- 1/3; E[2, 3] <- 2/3 E[3,2] <- 1/4; E[3, 4] <- 3/4 E[4, 3] <- 1 mcE <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = E, name = "E") is.irreducible(mcE) period(mcE) ``` The example Markov chain found in \proglang{Mathematica} web site \citep{mathematica9MarkovChain} has been used, and is plotted in Figure \@ref(fig:mcMathematics). ```{r mathematica9Mc} mathematicaMatr <- markovchain:::zeros(5) mathematicaMatr[1,] <- c(0, 1/3, 0, 2/3, 0) mathematicaMatr[2,] <- c(1/2, 0, 0, 0, 1/2) mathematicaMatr[3,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[4,] <- c(0, 0, 1/2, 1/2, 0) mathematicaMatr[5,] <- c(0, 0, 0, 0, 1) statesNames <- letters[1:5] mathematicaMc <- new("markovchain", transitionMatrix = mathematicaMatr, name = "Mathematica MC", states = statesNames) ``` ```{r mcMathematics, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="Mathematica 9 example. Markov chain plot."} plot(mathematicaMc, layout = layout.fruchterman.reingold) ``` ```{r mathematica9MC, echo=FALSE} summary(mathematicaMc) ``` ## First passage time distributions and means \cite{renaldoMatlab} provides code to compute first passage time (within $1,2,\ldots, n$ steps) given the initial state to be $i$. The \proglang{MATLAB} listings translated into \proglang{R} on which the `firstPassage` function is based are: ```{r fpTime1, eval=FALSE} .firstpassageKernel <- function(P, i, n){ G <- P H <- P[i,] E <- 1 - diag(size(P)[2]) for (m in 2:n) { G <- P %*% (G * E) H <- rbind(H, G[i,]) } return(H) } ``` We conclude that the probability for the *first* rainy day to be the third one, given that the current state is sunny, is given by: ```{r fpTime2} firstPassagePdF <- firstPassage(object = mcWeather, state = "sunny", n = 10) firstPassagePdF[3, 3] ``` To compute the *mean* first passage times, i.e. the expected number of days before it rains given that today is sunny, we can use the `meanFirstPassageTime` function: ```{r mfpt1} meanFirstPassageTime(mcWeather) ``` indicating e.g. that the average number of days of sun or cloud before rain is 6.67 if we start counting from a sunny day, and 5 if we start from a cloudy day. Note that we can also specify one or more destination states: ```{r mfpt2} meanFirstPassageTime(mcWeather,"rain") ``` The implementation follows the matrix solutions by [@GrinsteadSnell]. We can check the result by averaging the first passage probability density function: ```{r mfpt3} firstPassagePdF.long <- firstPassage(object = mcWeather, state = "sunny", n = 100) sum(firstPassagePdF.long[,"rain"] * 1:100) ``` ## Mean recurrence time The `meanRecurrenceTime` method gives the first mean recurrence time (expected number of steps to go back to a state if it was the initial one) for each recurrent state in the transition probabilities matrix for a DTMC. Let's see an example: ```{r mrt-weather} meanRecurrenceTime(mcWeather) ``` Another example, with not all of its states being recurrent: ```{r mrt-probMc} recurrentStates(probMc) meanRecurrenceTime(probMc) ``` ## Absorption probabilities and mean absorption time We are going to use the Drunkard’s random walk from [@GrinsteadSnell]. We have a drunk person walking through the street. Each move the person does, if they have not arrived to either home (corner 1) or to the bar (corner 5) could be to the left corner or to the right one, with equal probability. In case of arrival to the bar or to home, the person stays there. ```{r data-drunkard} drunkProbs <- markovchain:::zeros(5) drunkProbs[1,1] <- drunkProbs[5,5] <- 1 drunkProbs[2,1] <- drunkProbs[2,3] <- 1/2 drunkProbs[3,2] <- drunkProbs[3,4] <- 1/2 drunkProbs[4,3] <- drunkProbs[4,5] <- 1/2 drunkMc <- new("markovchain", transitionMatrix = drunkProbs) drunkMc ``` Recurrent (in fact absorbing states) are: ```{r rs-drunkard} recurrentStates(drunkMc) ``` Transient states are the rest: ```{r ts-drunkard} transientStates(drunkMc) ``` The probability of either being absorbed by the bar or by the sofa at home are: ```{r ap-drunkard} absorptionProbabilities(drunkMc) ``` which means that the probability of arriving home / bar is inversely proportional to the distance to each one. But we also would like to know how much time does the person take to arrive there, which can be done with `meanAbsorptionTime`: ```{r at-drunkard} meanAbsorptionTime(drunkMc) ``` So it would take `3` steps to arrive to the destiny if the person is either in the second or fourth corner, and `4` steps in case of being at the same distance from home than to the bar. ## Committor probability The committor probability tells us the probability to reach a given state before another given. Suppose that we start in a cloudy day, the probabilities of experiencing a rainy day before a sunny one is 0.5: ```{r} committorAB(mcWeather,3,1) ``` ## Hitting probabilities Rewriting the system \eqref{eq:hitting-probs} as: \begin{equation*} A = \left(\begin{array}{c|c|c|c} A_1 & 0 & \ldots & 0 \\ \hline 0 & A_2 & \ldots & 0 \\ \hline \vdots & \vdots & \ddots & 0 \\ \hline 0 & 0 & \ldots & A_n \end{array}\right) \end{equation*} \begin{eqnarray*} A_1 &= \left(\begin{matrix} -1 & p_{1,2} & p_{1,3} & \ldots & p_{1,n} \\ 0 & (p_{2,2} - 1) & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ 0 & p_{n, 2} & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ A_2 &= \left(\begin{matrix} (p_{1,1} - 1) & 0 & p_{1,3} & \ldots & p_{1,n} \\ p_{2,1} & -1 & p_{2,3} & \ldots & p_{2,n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & 0 & p_{n,3} & \ldots & (p_{n,n} - 1) \end{matrix}\right)\\ \vdots & \vdots\\ A_n &= \left(\begin{matrix} (p_{1,1} - 1) & p_{1,2} & p_{1,3} & \ldots & 0 \\ p_{2,1} & (p_{2,2} -1) & p_{2,3} & \ldots & 0 \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ p_{n,1} & p_{n,2} & p_{n,3} & \ldots & -1 \end{matrix}\right)\\ \end{eqnarray*} \begin{equation*} \begin{array}{lr} X_j = \left(\begin{array}{c} h_{1,j} \\ h_{2,j} \\ \vdots \\ h_{n,j} \end{array}\right) & C_j = - \left(\begin{array}{c} p_{1,j} \\ p_{2,j} \\ \vdots \\ p_{n,j} \end{array}\right) \end{array} \end{equation*} we end up having to solve the block systems: \begin{equation} A_j \cdot X_j = C_j \end{equation} Let us imagine the $i$ -th state has transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then that same row would turn into $(0,0, \ldots, 0)$ for some block, thus obtaining a singular matrix. Another case which may give us problems could be: state $i$ has the following transition probabilities: $(0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0)$ and the state $j$ has the following transition probabilities: $(0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0)$. Then when building some blocks we will end up with rows: \begin{eqnarray*} (0, \ldots, 0, \underset{i)}{-1}, 0, \ldots, 0, \underset{j)}{1}, 0, \ldots, 0) \\ (0, \ldots, 0, \underset{i)}{1}, 0, \ldots, 0, \underset{j)}{-1}, 0, \ldots, 0) \end{eqnarray*} which are linearly dependent. Our hypothesis is that if we treat the closed communicating classes differently, we *might* delete the linearity in the system. If we have a closed communicating class $C_u$, then $h_{i,j} = 1$ for all $i,j \in C_u$ and $h_{k,j} = 0$ for all $k\not\in C_u$. Then we can set $X_u$ appropriately and solve the other $X_v$ using those values. The method in charge of that in `markovchain` package is `hittingProbabilities`, which receives a Markov chain and computes the matrix $(h_{ij})_{i,j = 1,\ldots, n}$ where $S = \{s_1, \ldots, s_n\}$ is the set of all states of the chain. For the following chain: ```{r hitting-data} M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 hittingTest <- new("markovchain", transitionMatrix = M) hittingProbabilities(hittingTest) ``` we want to compute the hitting probabilities. That can be done with: ```{r hitting-probabilities} hittingProbabilities(hittingTest) ``` In the case of the `mcWeather` Markov chain we would obtain a matrix with all its elements set to $1$. That makes sense (and is desirable) since if today is sunny, we expect it would be sunny again at certain point in the time, and the same with rainy weather (that way we assure good harvests): ```{r hitting-weather} hittingProbabilities(mcWeather) ``` # Statistical analysis {#sec:statistics} Table \@ref(tab:funs4Stats) lists the functions and methods implemented within the package which help to fit, simulate and predict DTMC. \begin{table}[h] \centering \begin{tabular}{lll} \hline Function & Purpose \\ \hline \hline \code{markovchainFit} & Function to return fitted Markov chain for a given sequence.\\ \code{predict} & Method to calculate predictions from \code{markovchain} or \\ & \code{markovchainList} objects.\\ \code{rmarkovchain} & Function to sample from \code{markovchain} or \code{markovchainList} objects.\\ \hline \end{tabular} \caption{The \pkg{markovchain} statistical functions.} \label{tab:funs4Stats} \end{table} ## Simulation Simulating a random sequence from an underlying DTMC is quite easy thanks to the function `rmarkovchain`. The following code generates a year of weather states according to `mcWeather` underlying stochastic process. ```{r simulatingAMarkovChain} weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") weathersOfDays[1:30] ``` Similarly, it is possible to simulate one or more sequences from a semi-homogeneous Markov chain, as the following code (applied on CCHC example) exemplifies. ```{r simulatingAListOfMarkovChain} patientStates <- rmarkovchain(n = 5, object = mcCCRC, t0 = "H", include.t0 = TRUE) patientStates[1:10,] ``` Two advance parameters are available to the `rmarkovchain` method which helps you decide which implementation to use. There are four options available : \proglang{R}, \proglang{R} in parallel, \proglang{C++} and \proglang{C++} in parallel. Two boolean parameters `useRcpp` and `parallel` will decide which implementation will be used. Default is \code{useRcpp = TRUE} and \code{parallel = FALSE} i.e. \proglang{C++} implementation. The \proglang{C++} implementation is generally faster than the `R` implementation. If you have multicore processors then you can take advantage of `parallel` parameter by setting it to `TRUE`. When both `Rcpp=TRUE` and `parallel=TRUE` the parallelization has been carried out using \pkg{RcppParallel} package \citep{pkg:RcppParallel}. ## Estimation A time homogeneous Markov chain can be fit from given data. Four methods have been implemented within current version of \pkg{markovchain} package: maximum likelihood, maximum likelihood with Laplace smoothing, Bootstrap approach, maximum a posteriori. Equation \ref{eq:MLE} shows the maximum likelihood estimator (MLE) of the $p_{ij}$ entry, where the $n_{ij}$ element consists in the number sequences $\left( X_{t}=s_{i}, X_{t+1}=s_{j}\right)$ found in the sample, that is \begin{equation} {\hat p^{MLE}}_{ij} = \frac{n_{ij}}{\sum\limits_{u = 1}^k {n_{iu}}}. \label{eq:MLE} \end{equation} Equation \@ref(eq:SE) shows the `standardError` of the MLE \citep{MSkuriat}. \begin{equation} SE_{ij} = \frac{ {\hat p^{MLE}}_{ij} }{\sqrt{n_{ij}}} \label{eq:SE} \end{equation} ```{r fitMcbyMLE2} weatherFittedMLE <- markovchainFit(data = weathersOfDays, method = "mle",name = "Weather MLE") weatherFittedMLE$estimate weatherFittedMLE$standardError ``` The Laplace smoothing approach is a variation of the MLE, where the $n_{ij}$ is substituted by $n_{ij}+\alpha$ (see Equation \ref{eq:LAPLACE}), being $\alpha$ an arbitrary positive stabilizing parameter. \begin{equation} {\hat p^{LS}}_{ij} = \frac{{{n_{ij}} + \alpha }}{{\sum\limits_{u = 1}^k {\left( {{n_{iu}} + \alpha } \right)} }} \label{eq:LAPLACE} \end{equation} ```{r fitMcbyLAPLACE} weatherFittedLAPLACE <- markovchainFit(data = weathersOfDays, method = "laplace", laplacian = 0.01, name = "Weather LAPLACE") weatherFittedLAPLACE$estimate ``` (NOTE: The Confidence Interval option is enabled by default. Remove this option to fasten computations.) Both MLE and Laplace approach are based on the `createSequenceMatrix` functions that returns the raw counts transition matrix. ```{r fitSequenceMatrix} createSequenceMatrix(stringchar = weathersOfDays) ``` `stringchar` could contain `NA` values, and the transitions containing `NA` would be ignored. An issue occurs when the sample contains only one realization of a state (say $X_{\beta}$) which is located at the end of the data sequence, since it yields to a row of zero (no sample to estimate the conditional distribution of the transition). In this case the estimated transition matrix is corrected assuming $p_{\beta,j}=1/k$, being $k$ the possible states. Create sequence matrix can also be used to obtain raw count transition matrices from a given $n*2$ matrix as the following example shows: ```{r fitSequenceMatrix2} myMatr<-matrix(c("a","b","b","a","a","b","b","b","b","a","a","a","b","a"),ncol=2) createSequenceMatrix(stringchar = myMatr,toRowProbs = TRUE) ``` A bootstrap estimation approach has been developed within the package in order to provide an indication of the variability of ${\hat p}_{ij}$ estimates. The bootstrap approach implemented within the \pkg{markovchain} package follows these steps: 1. bootstrap the data sequences following the conditional distributions of states estimated from the original one. The default bootstrap samples is 10, as specified in `nboot` parameter of `markovchainFit` function. 2. apply MLE estimation on bootstrapped data sequences that are saved in `bootStrapSamples` slot of the returned list. 3. the ${p^{BOOTSTRAP}}_{ij}$ is the average of all ${p^{MLE}}_{ij}$ across the `bootStrapSamples` list, normalized by row. A `standardError` of $\hat{{p^{MLE}}_{ij}}$ estimate is provided as well. ```{r fitMcbyBootStrap1} weatherFittedBOOT <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 20) weatherFittedBOOT$estimate weatherFittedBOOT$standardError ``` The bootstrapping process can be done in parallel thanks to \pkg{RcppParallel} package \citep{pkg:RcppParallel}. Parallelized implementation is definitively suggested when the data sample size or the required number of bootstrap runs is high. ```{r fitMcbyBootStrap2, eval=FALSE} weatherFittedBOOTParallel <- markovchainFit(data = weathersOfDays, method = "bootstrap", nboot = 200, parallel = TRUE) weatherFittedBOOTParallel$estimate weatherFittedBOOTParallel$standardError ``` The parallel bootstrapping uses all the available cores on a machine by default. However, it is also possible to tune the number of threads used. Note that this should be done in R before calling the `markovchainFit` function. For example, the following code will set the number of threads to 4. ```{r fitMcbyBootStrap3, eval=FALSE} RcppParallel::setNumThreads(2) ``` For more details, please refer to \pkg{RcppParallel} web site. For all the fitting methods, the `logLikelihood` \citep{MSkuriat} denoted in Equation \ref{eq:LLH} is provided. \begin{equation} LLH = \sum_{i,j} n_{ij} * log (p_{ij}) \label{eq:LLH} \end{equation} where $n_{ij}$ is the entry of the frequency matrix and $p_{ij}$ is the entry of the transition probability matrix. ```{r fitMcbyMLE1} weatherFittedMLE$logLikelihood weatherFittedBOOT$logLikelihood ``` Confidence matrices of estimated parameters (parametric for MLE, non - parametric for BootStrap) are available as well. The `confidenceInterval` is provided with the two matrices: `lowerEndpointMatrix` and `upperEndpointMatrix`. The confidence level (CL) is 0.95 by default and can be given as an argument of the function `markovchainFit`. This is used to obtain the standard score (z-score). From classical inference theory, if $ci$ is the level of confidence required assuming normal distribution the $zscore(ci)$ solves $\Phi \left ( 1-\left(\frac{1-ci}{2}\right) \right )$ Equations \ref{eq:CIL} and \ref{eq:CIU} \citep{MSkuriat} show the `confidenceInterval` of a fitting. Note that each entry of the matrices is bounded between 0 and 1. \begin{align} LowerEndpoint_{ij} = p_{ij} - zscore (CL) * SE_{ij} \label{eq:CIL} \\ UpperEndpoint_{ij} = p_{ij} + zscore (CL) * SE_{ij} \label{eq:CIU} \end{align} ```{r confint} weatherFittedMLE$confidenceInterval weatherFittedBOOT$confidenceInterval ``` A special function, `multinomialConfidenceIntervals`, has been written in order to obtain multinomial wise confidence intervals. The code has been based on and Rcpp translation of package's \pkg{MultinomialCI} functions \cite{pkg:MultinomialCI} that were themselves based on the \cite{sison1995simultaneous} paper. ```{r multinomial} multinomialConfidenceIntervals(transitionMatrix = weatherFittedMLE$estimate@transitionMatrix, countsTransitionMatrix = createSequenceMatrix(weathersOfDays)) ``` The functions for fitting DTMC have mostly been rewritten in \proglang{C++} using \pkg{Rcpp} \cite{RcppR} since version 0.2. It is also possible to fit a DTMC object from `matrix` or `data.frame` objects as shown in following code. ```{r fitMclists} data(holson) singleMc<-markovchainFit(data=holson[,2:12],name="holson") ``` The same applies for `markovchainList` (output length has been limited). ```{r fitMclistsFit1, output.lines=20} mcListFit<-markovchainListFit(data=holson[,2:6],name="holson") mcListFit$estimate ``` Finally, given a `list` object, it is possible to fit a `markovchain` object or to obtain the raw transition matrix. ```{r fitMclistsFit2} c1<-c("a","b","a","a","c","c","a") c2<-c("b") c3<-c("c","a","a","c") c4<-c("b","a","b","a","a","c","b") c5<-c("a","a","c",NA) c6<-c("b","c","b","c","a") mylist<-list(c1,c2,c3,c4,c5,c6) mylistMc<-markovchainFit(data=mylist) mylistMc ``` The same works for `markovchainFitList`. ```{r fitAMarkovChainListfromAlist, output.lines=15} markovchainListFit(data=mylist) ``` If any transition contains `NA`, it will be ignored in the results as the above example showed. ## Prediction The $n$-step forward predictions can be obtained using the `predict` methods explicitly written for `markovchain` and `markovchainList` objects. The prediction is the mode of the conditional distribution of $X_{t+1}$ given $X_{t}=s_{j}$, being $s_{j}$ the last realization of the DTMC (homogeneous or semi-homogeneous). ### Predicting from a markovchain object The 3-days forward predictions from `markovchain` object can be generated as follows, assuming that the last two days were respectively "cloudy" and "sunny". ```{r markovchainPredict} predict(object = weatherFittedMLE$estimate, newdata = c("cloudy", "sunny"), n.ahead = 3) ``` ### Predicting from a markovchainList object Given an initial two years health status, the 5-year ahead prediction of any CCRC guest is ```{r markovchainListPredict} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5) ``` The prediction has stopped at time sequence since the underlying semi-homogeneous Markov chain has a length of four. In order to continue five years ahead, the `continue=TRUE` parameter setting makes the `predict` method keeping to use the last `markovchain` in the sequence list. ```{r markovchainListPredict2} predict(mcCCRC, newdata = c("H", "H"), n.ahead = 5, continue = TRUE) ``` ## Statistical Tests In this section, we describe the statistical tests: assessing the Markov property (`verifyMarkovProperty`), the order (`assessOrder`), the stationary (`assessStationarity`) of a Markov chain sequence, and the divergence test for empirically estimated transition matrices (`divergenceTest`). Most of such tests are based on the $\chi ^2$ statistics. Relevant references are \cite{kullback1962tests} and \cite{anderson1957statistical}. All such tests have been designed for small samples, since it is easy to detect departures from Markov property as long as the sample size increases. In addition, the accuracy of the statistical inference functions has been questioned and will be thoroughly investigated in future versions of the package. ### Assessing the Markov property of a Markov chain sequence The `verifyMarkovProperty` function verifies whether the Markov property holds for the given chain. The test implemented in the package looks at triplets of successive observations. If $x_1, x_2, \ldots, x_N$ is a set of observations and $n_{ijk}$ is the number of times $t$ $\left(1 \le t \le N-2 \right)$ such that $x_t=i, x_{t+1}=j, x_{x+2}=k$, then if the Markov property holds $n_{ijk}$ follows a Binomial distribution with parameters $n_{ij}$ and $p_{jk}$. A classical $\chi^2$ test can check this distributional assumption, since $\sum_{i}\sum_{j}\sum_{k}\frac{(n_{ijk}-n_{ij}\hat{p_{jk}})^2}{n_{ij}\hat{p_{jk}}}\sim \chi^2\left(q \right )$ where q is the number of degrees of freedom. The number of degrees of freedom q of the distribution of $\chi^2$ is given by the formula r-q+s-1, where: s denotes the number of states i in the state space such that n_{i} > 0 q denotes the number of pairs (i, j) for which n_{ij} > 0 and r denotes the number of triplets (i, j, k) for which n_{ij}n_{jk} > 0 ```{r test1} sample_sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") verifyMarkovProperty(sample_sequence) ``` ### Assessing the order of a Markov chain sequence The `assessOrder` function checks whether the given chain is of first order or of second order. For each possible present state, we construct a contingency table of the frequency of the future state for each past to present state transition as shown in Table \ref{tab:order}. \begin{table}[h] \centering \begin{tabular}{l | l | l | l} \hline past & present & future & future \\ & & a & b \\ \hline \hline a & a & 2 & 2\\ b & a & 2 & 2\\ \hline \end{tabular} \caption{Contingency table to assess the order for the present state a.} \label{tab:order} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is of first order. ```{r test2} data(rain) assessOrder(rain$rain) ``` ### Assessing the stationarity of a Markov chain sequence The `assessStationarity` function assesses if the transition probabilities of the given chain change over time. To be more specific, the chain is stationary if the following condition meets. \begin{equation} p_{ij}(t) = p_{ij} ~\textrm{ for all }~t \label{eq:stationarity} \end{equation} For each possible state, we construct a contingency table of the estimated transition probabilities over time as shown in Table \ref{tab:stationarity}. \begin{table}[h] \centering \begin{tabular}{l | l | l} \hline time (t) & probability of transition to a & probability of transition to b \\ \hline \hline 1 & 0 & 1\\ 2 & 0 & 1\\ . & . & . \\ . & . & . \\ . & . & . \\ 16 & 0.44 & 0.56\\ \hline \end{tabular} \caption{Contingency table to assess the stationarity of the state a.} \label{tab:stationarity} \end{table} Using the table, the function performs the $\chi ^2$ test by calling the `chisq.test` function. This test returns a list of the chi-squared value and the p-value. If the p-value is greater than the given significance level, we cannot reject the hypothesis that the sequence is stationary. ```{r test3} assessStationarity(rain$rain, 10) ``` ### Divergence tests for empirically estimated transition matrices This section discusses tests developed to verify whether: 1. An empirical transition matrix is consistent with a theoretical one. 2. Two or more empirical transition matrices belongs to the same DTMC. The first test is implemented by the `verifyEmpiricalToTheoretical` function. Being $f_{ij}$ the raw transition count, \cite{kullback1962tests} shows that $2*\sum_{i=1}^{r}\sum_{j=1}^{r}f_{ij}\ln\frac{f_{ij}}{f_{i.}P\left( E_j | E_i\right)} \sim \chi^2\left ( r*(r-1) \right )$. The following example is taken from \cite{kullback1962tests}: ```{r divergence1} sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) ``` The second one is implemented by the `verifyHomogeneity` function, inspired by \cite[section~9]{kullback1962tests}. Assuming that $i=1,2, \ldots, s$ DTMC samples are available and that the cardinality of the state space is $r$ it verifies whether the $s$ chains belongs to the same unknown one. \cite{kullback1962tests} shows that its test statistics follows a chi-square law, $2*\sum_{i=1}^{s}\sum_{j=1}^{r}\sum_{k=1}^{r}f_{ijk}\ln\frac{n*f_{ijk}}{f_{i..}f_{.jk}} \sim \chi^2\left ( r*(r-1) \right )$. Also the following example is taken from \cite{kullback1962tests}: ```{r divergence2} data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) ``` ## Continuous Times Markov Chains ### Intro The \pkg{markovchain} package provides functionality for continuous time Markov chains (CTMCs). CTMCs are a generalization of discrete time Markov chains (DTMCs) in that we allow time to be continuous. We assume a finite state space $S$ (for an infinite state space wouldn't fit in memory). We can think of CTMCs as Markov chains in which state transitions can happen at any time. More formally, we would like our CTMCs to satisfy the following two properties: * The Markov property - let $F_{X(s)}$ denote the information about $X$ up to time $s$. Let $j \in S$ and $s \leq t$. Then, $P(X(t) = j|F_{X(s)}) = P(X(t) = j|X(s))$. * Time homogeneity - $P(X(t) = j|X(s) = k) = P(X(t-s) = j|X(0) = k)$. If both the above properties are satisfied, it is referred to as a time-homogeneous CTMC. If a transition occurs at time $t$, then $X(t)$ denotes the new state and $X(t)\neq X(t-)$. Now, let $X(0)=x$ and let $T_x$ be the time a transition occurs from this state. We are interested in the distribution of $T_x$. For $s,t \geq 0$, it can be shown that $ P(T_x > s+t | T_x > s) = P(T_x > t) $ This is the memory less property that only the exponential random variable exhibits. Therefore, this is the sought distribution, and each state $s \in S$ has an exponential holding parameter $\lambda(s)$. Since $\mathrm{E}T_x = \frac{1}{\lambda(x)}$, higher the rate $\lambda(x)$, smaller the expected time of transitioning out of the state $x$. However, specifying this parameter alone for each state would only paint an incomplete picture of our CTMC. To see why, consider a state $x$ that may transition to either state $y$ or $z$. The holding parameter enables us to predict when a transition may occur if we start off in state $x$, but tells us nothing about which state will be next. To this end, we also need transition probabilities associated with the process, defined as follows (for $y \neq x$) - $p_{xy} = P(X(T_s) = y | X(0) = x)$. Note that $\sum_{y \neq x} p_{xy} = 1$. Let $Q$ denote this transition matrix ($Q_{ij} = p_{ij}$). What is key here is that $T_x$ and the state $y$ are independent random variables. Let's define $\lambda(x, y) = \lambda(x) p_{xy}$ We now look at Kolmogorov's backward equation. Let's define $P_{ij}(t) = P(X(t) = j | X(0) = i)$ for $i, j \in S$. The backward equation is given by (it can be proved) $P_{ij}(t) = \delta_{ij}e^{-\lambda(i)t} + \int_{0}^{t}\lambda(i)e^{-\lambda(i)t} \sum_{k \neq i} Q_{ik} P_{kj}(t-s) ds$. Basically, the first term is non-zero if and only if $i=j$ and represents the probability that the first transition from state $i$ occurs after time $t$. This would mean that at $t$, the state is still $i$. The second term accounts for any transitions that may occur before time $t$ and denotes the probability that at time $t$, when the smoke clears, we are in state $j$. This equation can be represented compactly as follows $P'(t) = AP(t)$ where $A$ is the *generator* matrix. \[ A(i, j) = \begin{cases} \lambda(i, j) & \mbox{if } i \neq j \\ -\lambda(i) & \mbox{else.} \end{cases} \] Observe that the sum of each row is 0. A CTMC can be completely specified by the generator matrix. ### Stationary Distributions The following theorem guarantees the existence of a unique stationary distribution for CTMCs. Note that $X(t)$ being irreducible and recurrent is the same as $X_n(t)$ being irreducible and recurrent. Suppose that $X(t)$ is irreducible and recurrent. Then $X(t)$ has an invariant measure $\eta$, which is unique up to multiplicative factors. Moreover, for each $k \in S$, we have \[\eta_k = \frac{\pi_k}{\lambda(k)}\] where $\pi$ is the unique invariant measure of the embedded discrete time Markov chain $Xn$. Finally, $\eta$ satisfies \[0 < \eta_j < \infty, \forall j \in S\] and if $\sum_i \eta_i < \infty$ then $\eta$ can be normalized to get a stationary distribution. ### Estimation Let the data set be $D = \{(s_0, t_0), (s_1, t_1), ..., (s_{N-1}, t_{N-1})\}$ where $N=|D|$. Each $s_i$ is a state from the state space $S$ and during the time $[t_i,t_{i+1}]$ the chain is in state $s_i$. Let the parameters be represented by $\theta = \{\lambda, P\}$ where $\lambda$ is the vector of holding parameters for each state and $P$ the transition matrix of the embedded discrete time Markov chain. Then the probability is given by \[ {Pr(D | \theta) \propto \lambda(s_0)e^{-\lambda(s_0)(t_1-t_0)}Pr(s_1|s_0) \cdot\ldots\cdot \lambda(s_{N-2})e^{-\lambda(s_{N-2})(t_{N-1}-t_{N-2})}Pr(s_{N-1}|s_{N-2})} \] Let $n(j|i)$ denote the number of $i$->$j$ transitions in $D$, and $n(i)$ the number of times $s_i$ occurs in $D$. Let $t(s_i)$ denote the total time the chain spends in state $s_i$. Then the MLEs are given by \[ \hat{\lambda(s)} = \frac{n(s)}{t(s)},\hat{Pr(j|i)}=\frac{n(j|i)}{n(i)} \] ### Expected Hitting Time The package provides a function `ExpectedTime` to calculate average hitting time from one state to another. Let the final state be j, then for every state $i \in S$, where $S$ is the set of all states and holding time $q_{i} > 0$ for every $i \neq j$. Assuming the conditions to be true, expected hitting time is equal to minimal non-negative solution vector $p$ to the system of linear equations: \begin{equation} \begin{cases} p_{k} = 0 & k = j \\ -\sum_{l \in I} q_{kl}p_{k} = 1 & k \neq j \end{cases} \label{eq:EHT} \end{equation} ### Probability at time t The package provides a function `probabilityatT` to calculate probability of every state according to given `ctmc` object. Here we use Kolmogorov's backward equation $P(t) = P(0)e^{tQ}$ for $t \geq 0$ and $P(0) = I$. Here $P(t)$ is the transition function at time t. The value $P(t)[i][j]$ at time $P(t)$ describes the probability of the state at time $t$ to be equal to j if it was equal to i at time $t=0$. It takes care of the case when `ctmc` object has a generator represented by columns. If initial state is not provided, the function returns the whole transition matrix $P(t)$. ### Examples To create a CTMC object, you need to provide a valid generator matrix, say $Q$. The CTMC object has the following slots - states, generator, by row, name (look at the documentation object for further details). Consider the following example in which we aim to model the transition of a molecule from the $\sigma$ state to the $\sigma^*$ state. When in the former state, if it absorbs sufficient energy, it can make the jump to the latter state and remains there for some time before transitioning back to the original state. Let us model this by a CTMC: ```{r rCtmcInit} energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") ``` To generate random CTMC transitions, we provide an initial distribution of the states. This must be in the same order as the dimnames of the generator. The output can be returned either as a list or a data frame. ```{r rctmcRandom0} statesDist <- c(0.8, 0.2) rctmc(n = 3, ctmc = molecularCTMC, initDist = statesDist, out.type = "df", include.T0 = FALSE) ``` $n$ represents the number of samples to generate. There is an optional argument $T$ for `rctmc`. It represents the time of termination of the simulation. To use this feature, set $n$ to a very high value, say `Inf` (since we do not know the number of transitions before hand) and set $T$ accordingly. ```{r ctmcRandom1} statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, initDist = statesDist, T = 2) ``` To obtain the stationary distribution simply invoke the `steadyStates` function ```{r rctmcSteadyStates} steadyStates(molecularCTMC) ``` For fitting, use the `ctmcFit` function. It returns the MLE values for the parameters along with the confidence intervals. ```{r rctmcFitting} data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) ``` One approach to obtain the generator matrix is to apply the `logm` function from the \pkg{expm} package on a transition matrix. Numeric issues arise, see \cite{israel2001finding}. For example, applying the standard `method` ('Higham08') on `mcWeather` raises an error, whilst the alternative method (eigenvalue decomposition) is OK. The following code estimates the generator matrix of the `mcWeather` transition matrix. ```{r mcWeatherQ} mcWeatherQ <- expm::logm(mcWeather@transitionMatrix,method='Eigen') mcWeatherQ ``` Therefore, the "half - day" transition probability for mcWeather DTMC is ```{r mcWeatherHalfDay} mcWeatherHalfDayTM <- expm::expm(mcWeatherQ*.5) mcWeatherHalfDay <- new("markovchain",transitionMatrix=mcWeatherHalfDayTM,name="Half Day Weather Transition Matrix") mcWeatherHalfDay ``` The \pkg{ctmcd} package \citep{pkg:ctmcd} provides various functions to estimate the generator matrix (GM) of a CTMC process using different methods. The following code provides a way to join \pkg{markovchain} and \pkg{ctmcd} computations. ```{r ctmcd1} if(requireNamespace(package='ctmcd', quietly = TRUE)) { require(ctmcd) require(expm) #defines a function to transform a GM into a TM gm_to_markovchain<-function(object, t=1) { if(!(class(object) %in% c("gm","matrix","Matrix"))) stop("Error! Expecting either a matrix or a gm object") if ( class(object) %in% c("matrix","Matrix")) generator_matrix<-object else generator_matrix<-as.matrix(object[["par"]]) #must add importClassesFrom("markovchain",markovchain) in the NAMESPACE #must add importFrom(expm, "expm") transitionMatrix<-expm(generator_matrix*t) out<-as(transitionMatrix,"markovchain") return(out) } #loading ctmcd dataset data(tm_abs) gm0=matrix(1,8,8) #initializing diag(gm0)=0 diag(gm0)=-rowSums(gm0) gm0[8,]=0 gmem=gm(tm_abs,te=1,method="EM",gmguess=gm0) #estimating GM mc_at_2=gm_to_markovchain(object=gmem, t=2) #converting to TM at time 2 } else { warning('package ctmcd unavailable') } ``` ## Pseudo - Bayesian Estimation \cite{Hu2002} shows an empirical quasi-Bayesian method to estimate transition matrices, given an empirical $\hat{P}$ transition matrix (estimated using the classical approach) and an a - priori estimate $Q$. In particular, each row of the matrix is estimated using the linear combination $\alpha \cdot Q+\left(1-1alpha\right) \cdot P$, where $\alpha$ is defined for each row as Equation \ref{eq:pseudobayes} shows \begin{equation} \left\{\begin{matrix} \hat{\alpha_i}=\frac{\hat{K_i}}{v\left(i \right )+\hat{K_i}}\\ \hat{K_i}=\frac{v\left(i \right)^2 - \sum_{j}Y_{ij}^2}{\sum_{j}(Y_{ij}-v\left(i \right)*q_{ij})^2} \end{matrix}\right. \label{eq:pseudobayes} \end{equation} The following code returns the pseudo Bayesian estimate of the transition matrix: ```{r pseudobayes} pseudoBayesEstimator <- function(raw, apriori){ v_i <- rowSums(raw) K_i <- numeric(nrow(raw)) sumSquaredY <- rowSums(raw^2) #get numerator K_i_num <- v_i^2-sumSquaredY #get denominator VQ <- matrix(0,nrow= nrow(apriori),ncol=ncol(apriori)) for (i in 1:nrow(VQ)) { VQ[i,]<-v_i[i]*apriori[i,] } K_i_den<-rowSums((raw - VQ)^2) K_i <- K_i_num/K_i_den #get the alpha vector alpha <- K_i / (v_i+K_i) #empirical transition matrix Emp<-raw/rowSums(raw) #get the estimate out<-matrix(0, nrow= nrow(raw),ncol=ncol(raw)) for (i in 1:nrow(out)) { out[i,]<-alpha[i]*apriori[i,]+(1-alpha[i])*Emp[i,] } return(out) } ``` We then apply it to the weather example: ```{r pseudobayes2} trueMc<-as(matrix(c(0.1, .9,.7,.3),nrow = 2, byrow = 2),"markovchain") aprioriMc<-as(matrix(c(0.5, .5,.5,.5),nrow = 2, byrow = 2),"markovchain") smallSample<-rmarkovchain(n=20,object = trueMc) smallSampleRawTransitions<-createSequenceMatrix(stringchar = smallSample) pseudoBayesEstimator( raw = smallSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix biggerSample<-rmarkovchain(n=100,object = trueMc) biggerSampleRawTransitions<-createSequenceMatrix(stringchar = biggerSample) pseudoBayesEstimator( raw = biggerSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix bigSample<-rmarkovchain(n=1000,object = trueMc) bigSampleRawTransitions<-createSequenceMatrix(stringchar = bigSample) pseudoBayesEstimator( raw = bigSampleRawTransitions, apriori = aprioriMc@transitionMatrix ) - trueMc@transitionMatrix ``` ## Bayesian Estimation The \pkg{markovchain} package provides functionality for maximum a posteriori (MAP) estimation of the chain parameters (at the time of writing this document, only first order models are supported) by Bayesian inference. It also computes the probability of observing a new data set, given a (different) data set. This vignette provides the mathematical description for the methods employed by the package. ### Notation and set-up The data is denoted by $D$, the model parameters (transition matrix) by $\theta$. The object of interest is $P(\theta | D)$ (posterior density). $\mathcal{A}$ represents an alphabet class, each of whose members represent a state of the chain. Therefore \[D = s_0 s_1 ... s_{N-1}, s_t \in \mathcal{A}\] where $N$ is the length of the data set. Also, \[\theta = \{p(s|u), s \in \mathcal{A}, u \in \mathcal{A} \}\] where $\sum_{s \in \mathcal{A}} p(s|u) = 1$ for each $u \in \mathcal{A}$. Our objective is to find $\theta$ which maximizes the posterior. That is, if our solution is denoted by $\hat{\theta}$, then \[\hat{\theta} = \underset{\theta}{argmax}P(\theta | D)\] where the search space is the set of right stochastic matrices of dimension $|\mathcal{A}|x|\mathcal{A}|$. $n(u, s)$ denotes the number of times the word $us$ occurs in $D$ and $n(u)=\sum_{s \in \mathcal{A}}n(u, s)$. The hyper-parameters are similarly denoted by $\alpha(u, s)$ and $\alpha(u)$ respectively. ### Methods Given $D$, its likelihood conditioned on the observed initial state in D is given by \[P(D|\theta) = \prod_{s \in \mathcal{A}} \prod_{u \in \mathcal{A}} p(s|u)^{n(u, s)}\] Conjugate priors are used to model the prior $P(\theta)$. The reasons are two fold: 1. Exact expressions can be derived for the MAP estimates, expectations and even variances 2. Model order selection/comparison can be implemented easily (available in a future release of the package) The hyper-parameters determine the form of the prior distribution, which is a product of Dirichlet distributions \[P(\theta) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{\alpha(u, s)) - 1} \Big\}\] where $\Gamma(.)$ is the Gamma function. The hyper-parameters are specified using the `hyperparam` argument in the `markovchainFit` function. If this argument is not specified, then a default value of 1 is assigned to each hyper-parameter resulting in the prior distribution of each chain parameter to be uniform over $[0,1]$. Given the likelihood and the prior as described above, the evidence $P(D)$ is simply given by \[P(D) = \int P(D|\theta) P(\theta) d\theta\] which simplifies to \[ P(D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u))} \Big\} \] Using Bayes' theorem, the posterior now becomes (thanks to the choice of conjugate priors) \[ P(\theta | D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(n(u) + \alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + \alpha(u, s))} \prod_{s \in \mathcal{A}} p(s|u)^{n(u, s) + \alpha(u, s)) - 1} \Big\} \] Since this is again a product of Dirichlet distributions, the marginal distribution of a particular parameter $P(s|u)$ of our chain is given by \[ P(s|u) \sim Beta(n(u, s) + \alpha(u, s), n(u) + \alpha(u) - n(u, s) - \alpha(u, s)) \] Thus, the MAP estimate $\hat{\theta}$ is given by \[ \hat{\theta} = \Big\{ \frac{n(u, s) + \alpha(u, s) - 1}{n(u) + \alpha(u) - |\mathcal{A}|}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The function also returns the expected value, given by \[ \text{E}_{\text{post}} p(s|u) = \Big\{ \frac{n(u, s) + \alpha(u, s)}{n(u) + \alpha(u)}, s \in \mathcal{A}, u \in \mathcal{A} \Big\} \] The variance is given by \[ \text{Var}_{\text{post}} p(s|u) = \frac{n(u, s) + \alpha(u, s)}{(n(u) + \alpha(u))^2} \frac{n(u) + \alpha(u) - n(u, s) - \alpha(u, s)}{n(u) + \alpha(u) + 1} \] The square root of this quantity is the standard error, which is returned by the function. The confidence intervals are constructed by computing the inverse of the beta integral. ### Predictive distribution Given the old data set, the probability of observing new data is $P(D'|D)$ where $D'$ is the new data set. Let $m(u, s), m(u)$ denote the corresponding counts for the new data. Then, \[ P(D'|D) = \int P(D' | \theta) P(\theta | D) d\theta \] We already know the expressions for both quantities in the integral and it turns out to be similar to evaluating the evidence \[ P(D'|D) = \prod_{u \in \mathcal{A}} \Big\{ \frac{\Gamma(\alpha(u))}{\prod_{s \in \mathcal{A}} \Gamma(\alpha(u, s))} \frac{\prod_{s \in \mathcal{A}} \Gamma(n(u, s) + m(u, s) + \alpha(u, s))}{\Gamma(\alpha(u) + n(u) + m(u))} \Big\} \] ### Choosing the hyper-parameters The hyper parameters model the shape of the parameters' prior distribution. These must be provided by the user. The package offers functionality to translate a given prior belief transition matrix into the hyper-parameter matrix. It is assumed that this belief matrix corresponds to the mean value of the parameters. Since the relation \[ \text{E}_{\text{prior}} p(s | u) = \frac{\alpha(u, s)}{\alpha(u)} \] holds, the function accepts as input the belief matrix as well as a scaling vector (serves as a proxy for $\alpha(.)$) and proceeds to compute $\alpha(., .)$. Alternatively, the function accepts a data sample and infers the hyper-parameters from it. Since the mode of a parameter (with respect to the prior distribution) is proportional to one less than the corresponding hyper-parameter, we set \[ \alpha(u, s) - 1 = m(u, s) \] where $m(u, s)$ is the $u\rightarrow s$ transition count in the data sample. This is regarded as a 'fake count' which helps $\alpha(u, s)$ to reflect knowledge of the data sample. ### Usage and examples ```{r loadAndDoExample} weatherStates <- c("sunny", "cloudy", "rain") byRow <- TRUE weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = byRow, nrow = 3, dimnames = list(weatherStates, weatherStates)) mcWeather <- new("markovchain", states = weatherStates, byrow = byRow, transitionMatrix = weatherMatrix, name = "Weather") weathersOfDays <- rmarkovchain(n = 365, object = mcWeather, t0 = "sunny") ``` For the purpose of this section, we shall continue to use the weather of days example introduced in the main vignette of the package (reproduced above for convenience). Let us invoke the fit function to estimate the MAP parameters with 92\% confidence bounds and hyper-parameters as shown below, based on the first 200 days of the weather data. Additionally, let us find out what the probability is of observing the weather data for the next 165 days. The usage would be as follows ```{r MAPFit} hyperMatrix<-matrix(c(1, 1, 2, 3, 2, 1, 2, 2, 3), nrow = 3, byrow = TRUE, dimnames = list(weatherStates,weatherStates)) markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix) ``` The results should not change after permuting the dimensions of the matrix. ```{r MAPFit2} hyperMatrix2<- hyperMatrix[c(2,3,1), c(2,3,1)] markovchainFit(weathersOfDays[1:200], method = "map", confidencelevel = 0.92, hyperparam = hyperMatrix2) predictiveDistribution(weathersOfDays[1:200], weathersOfDays[201:365],hyperparam = hyperMatrix2) ``` Note that the predictive probability is very small. However, this can be useful when comparing model orders. Suppose we have an idea of the (prior) transition matrix corresponding to the expected value of the parameters, and have a data set from which we want to deduce the MAP estimates. We can infer the hyper-parameters from this known transition matrix itself, and use this to obtain our MAP estimates. ```{r inferHyperparam} inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) ``` Alternatively, we can use a data sample to infer the hyper-parameters. ```{r inferHyperparam2} inferHyperparam(data = weathersOfDays[1:15]) ``` In order to use the inferred hyper-parameter matrices, we do ```{r inferHyperparam3} hyperMatrix3 <- inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) hyperMatrix3 <- hyperMatrix3$scaledInference hyperMatrix4 <- inferHyperparam(data = weathersOfDays[1:15]) hyperMatrix4 <- hyperMatrix4$dataInference ``` Now we can safely use `hyperMatrix3` and `hyperMatrix4` with `markovchainFit` (in the `hyperparam` argument). Supposing we don't provide any hyper-parameters, then the prior is uniform. This is the same as maximum likelihood. ```{r MAPandMLE} data(preproglucacon) preproglucacon <- preproglucacon[[2]] MLEest <- markovchainFit(preproglucacon, method = "mle") MAPest <- markovchainFit(preproglucacon, method = "map") MLEest$estimate MAPest$estimate ``` # Applications {#sec:applications} This section shows applications of DTMC in various fields. ## Weather forecasting {#app:weather} Markov chains provide a simple model to predict the next day's weather given the current meteorological condition. The first application herewith shown is the "Land of Oz example" from \cite{landOfOz}, the second is the "Alofi Island Rainfall" from \cite{averyHenderson}. ### Land of Oz {#sec:wfLandOfOz} The Land of Oz is acknowledged not to have ideal weather conditions at all: the weather is snowy or rainy very often and, once more, there are never two nice days in a row. Consider three weather states: rainy, nice and snowy. Let the transition matrix be as in the following: ```{r weatPred1} mcWP <- new("markovchain", states = c("rainy", "nice", "snowy"), transitionMatrix = matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25,0.25,0.5), byrow = T, nrow = 3)) ``` Given that today it is a nice day, the corresponding stochastic row vector is $w_{0}=(0\:,1\:,0)$ and the forecast after 1, 2 and 3 days are given by ```{r weatPred2} W0 <- t(as.matrix(c(0, 1, 0))) W1 <- W0 * mcWP; W1 W2 <- W0 * (mcWP ^ 2); W2 W3 <- W0 * (mcWP ^ 3); W3 ``` As can be seen from $w_{1}$, if in the Land of Oz today is a nice day, tomorrow it will rain or snow with probability 1. One week later, the prediction can be computed as ```{r weatPred3} W7 <- W0 * (mcWP ^ 7) W7 ``` The steady state of the chain can be computed by means of the `steadyStates` method. ```{r weatPred4} q <- steadyStates(mcWP) q ``` Note that, from the seventh day on, the predicted probabilities are substantially equal to the steady state of the chain and they don't depend from the starting point, as the following code shows. ```{r weatPred5} R0 <- t(as.matrix(c(1, 0, 0))) R7 <- R0 * (mcWP ^ 7); R7 S0 <- t(as.matrix(c(0, 0, 1))) S7 <- S0 * (mcWP ^ 7); S7 ``` ### Alofi Island Rainfall {#sec:wfAlofi} Alofi Island daily rainfall data were recorded from January 1st, 1987 until December 31st, 1989 and classified into three states: "0" (no rain), "1-5" (from non zero until 5 mm) and "6+" (more than 5mm). The corresponding dataset is provided within the \pkg{markovchain} package. ```{r Alofi1} data("rain", package = "markovchain") table(rain$rain) ``` The underlying transition matrix is estimated as follows. ```{r Alofi2} mcAlofi <- markovchainFit(data = rain$rain, name = "Alofi MC")$estimate mcAlofi ``` The long term daily rainfall distribution is obtained by means of the `steadyStates` method. ```{r Alofi3} steadyStates(mcAlofi) ``` ## Finance and Economics {#app:fin} Other relevant applications of DTMC can be found in Finance and Economics. ### Finance {#fin:fin} Credit ratings transitions have been successfully modeled with discrete time Markov chains. Some rating agencies publish transition matrices that show the empirical transition probabilities across credit ratings. The example that follows comes from \pkg{CreditMetrics} \proglang{R} package \citep{CreditMetricsR}, carrying Standard \& Poor's published data. ```{r ratings1} rc <- c("AAA", "AA", "A", "BBB", "BB", "B", "CCC", "D") creditMatrix <- matrix( c(90.81, 8.33, 0.68, 0.06, 0.08, 0.02, 0.01, 0.01, 0.70, 90.65, 7.79, 0.64, 0.06, 0.13, 0.02, 0.01, 0.09, 2.27, 91.05, 5.52, 0.74, 0.26, 0.01, 0.06, 0.02, 0.33, 5.95, 85.93, 5.30, 1.17, 1.12, 0.18, 0.03, 0.14, 0.67, 7.73, 80.53, 8.84, 1.00, 1.06, 0.01, 0.11, 0.24, 0.43, 6.48, 83.46, 4.07, 5.20, 0.21, 0, 0.22, 1.30, 2.38, 11.24, 64.86, 19.79, 0, 0, 0, 0, 0, 0, 0, 100 )/100, 8, 8, dimnames = list(rc, rc), byrow = TRUE) ``` It is easy to convert such matrices into `markovchain` objects and to perform some analyses ```{r ratings2} creditMc <- new("markovchain", transitionMatrix = creditMatrix, name = "S&P Matrix") absorbingStates(creditMc) ``` ### Economics {#fin:ec} For a recent application of \pkg{markovchain} in Economic, see \cite{manchesterR}. A dynamic system generates two kinds of economic effects \citep{bardPpt}: 1. those incurred when the system is in a specified state, and 2. those incurred when the system makes a transition from one state to another. Let the monetary amount of being in a particular state be represented as a m-dimensional column vector $c^{\rm{S}}$, while let the monetary amount of a transition be embodied in a $C^{R}$ matrix in which each component specifies the monetary amount of going from state i to state j in a single step. Henceforth, Equation \@ref(eq:cost) represents the monetary of being in state $i$. \begin{equation} {c_i} = c_i^{\rm{S}} + \sum\limits_{j = 1}^m {C_{ij}^{\rm{R}}} {p_{ij}}. \label{eq:cost} \end{equation} Let $\bar c = \left[ c_i \right]$ and let $e_i$ be the vector valued 1 in the initial state and 0 in all other, then, if $f_n$ is the random variable representing the economic return associated with the stochastic process at time $n$, Equation \@ref(eq:return) holds: \begin{equation} E\left[ {{f_n}\left( {{X_n}} \right)|{X_0} = i} \right] = {e_i}{P^n}\bar c. \label{eq:return} \end{equation} The following example assumes that a telephone company models the transition probabilities between customer/non-customer status by matrix $P$ and the cost associated to states by matrix $M$. ```{r economicAnalysis1} statesNames <- c("customer", "non customer") P <- markovchain:::zeros(2); P[1, 1] <- .9; P[1, 2] <- .1; P[2, 2] <- .95; P[2, 1] <- .05; rownames(P) <- statesNames; colnames(P) <- statesNames mcP <- new("markovchain", transitionMatrix = P, name = "Telephone company") M <- markovchain:::zeros(2); M[1, 1] <- -20; M[1, 2] <- -30; M[2, 1] <- -40; M[2, 2] <- 0 ``` If the average revenue for existing customer is +100, the cost per state is computed as follows. ```{r economicAnalysis2} c1 <- 100 + conditionalDistribution(mcP, state = "customer") %*% M[1,] c2 <- 0 + conditionalDistribution(mcP, state = "non customer") %*% M[2,] ``` For an existing customer, the expected gain (loss) at the fifth year is given by the following code. ```{r economicAnalysis3} as.numeric((c(1, 0)* mcP ^ 5) %*% (as.vector(c(c1, c2)))) ``` ## Actuarial science {#app:act} Markov chains are widely applied in the field of actuarial science. Two classical applications are policyholders' distribution across Bonus Malus classes in Motor Third Party Liability (MTPL) insurance (Section \@ref(sec:bm)) and health insurance pricing and reserving (Section \@ref(sec:hi)). ### MPTL Bonus Malus {#sec:bm} Bonus Malus (BM) contracts grant the policyholder a discount (enworsen) as a function of the number of claims in the experience period. The discount (enworsen) is applied on a premium that already allows for known (a priori) policyholder characteristics \citep{denuit2007actuarial} and it usually depends on vehicle, territory, the demographic profile of the policyholder, and policy coverage deep (deductible and policy limits).\\ Since the proposed BM level depends on the claim on the previous period, it can be modeled by a discrete Markov chain. A very simplified example follows. Assume a BM scale from 1 to 5, where 4 is the starting level. The evolution rules are shown in Equation \ref{eq:BM}: \begin{equation} bm_{t + 1} = \max \left( {1,bm_{t} - 1} \right)*\left( {\tilde N = 0} \right) + \min \left( {5,bm_{t} + 2*\tilde N} \right)*\left( {\tilde N \ge 1} \right). \label{eq:BM} \end{equation} The number of claim $\tilde N$ is a random variable that is assumed to be Poisson distributed. ```{r bonusMalus1} getBonusMalusMarkovChain <- function(lambda) { bmMatr <- markovchain:::zeros(5) bmMatr[1, 1] <- dpois(x = 0, lambda) bmMatr[1, 3] <- dpois(x = 1, lambda) bmMatr[1, 5] <- 1 - ppois(q = 1, lambda) bmMatr[2, 1] <- dpois(x = 0, lambda) bmMatr[2, 4] <- dpois(x = 1, lambda) bmMatr[2, 5] <- 1 - ppois(q = 1, lambda) bmMatr[3, 2] <- dpois(x = 0, lambda) bmMatr[3, 5] <- 1 - dpois(x=0, lambda) bmMatr[4, 3] <- dpois(x = 0, lambda) bmMatr[4, 5] <- 1 - dpois(x = 0, lambda) bmMatr[5, 4] <- dpois(x = 0, lambda) bmMatr[5, 5] <- 1 - dpois(x = 0, lambda) stateNames <- as.character(1:5) out <- new("markovchain", transitionMatrix = bmMatr, states = stateNames, name = "BM Matrix") return(out) } ``` Assuming that the a-priori claim frequency per car-year is 0.05 in the class (being the class the group of policyholders that share the same common characteristics), the underlying BM transition matrix and its underlying steady state are as follows. ```{r bonusMalus2} bmMc <- getBonusMalusMarkovChain(0.05) as.numeric(steadyStates(bmMc)) ``` If the underlying BM coefficients of the class are 0.5, 0.7, 0.9, 1.0, 1.25, this means that the average BM coefficient applied on the long run to the class is given by ```{r bonusMalus3} sum(as.numeric(steadyStates(bmMc)) * c(0.5, 0.7, 0.9, 1, 1.25)) ``` This means that the average premium paid by policyholders in the portfolio almost halves in the long run. ### Health insurance example {#sec:hi} Actuaries quantify the risk inherent in insurance contracts evaluating the premium of insurance contract to be sold (therefore covering future risk) and evaluating the actuarial reserves of existing portfolios (the liabilities in terms of benefits or claims payments due to policyholder arising from previously sold contracts), see \cite{deshmukh2012multiple} for details. An applied example can be performed using the data from \cite{de2016assicurazioni} that has been saved in the `exdata` folder. ```{r healthIns6} ltcDemoPath<-system.file("extdata", "ltdItaData.txt", package = "markovchain") ltcDemo<-read.table(file = ltcDemoPath, header=TRUE, sep = ";", dec = ".") head(ltcDemo) ``` The data shows the probability of transition between the state of (A)ctive, to (I)ll and Dead. It is easy to complete the transition matrix. ```{r healthIns7} ltcDemo<-transform(ltcDemo, pIA=0, pII=1-pID, pDD=1, pDA=0, pDI=0) ``` Now we build a function that returns the transition during the $t+1$ th year, assuming that the subject has attained year $t$. ```{r healthIns8} possibleStates<-c("A","I","D") getMc4Age<-function(age) { transitionsAtAge<-ltcDemo[ltcDemo$age==age,] myTransMatr<-matrix(0, nrow=3,ncol = 3, dimnames = list(possibleStates, possibleStates)) myTransMatr[1,1]<-transitionsAtAge$pAA[1] myTransMatr[1,2]<-transitionsAtAge$pAI[1] myTransMatr[1,3]<-transitionsAtAge$pAD[1] myTransMatr[2,2]<-transitionsAtAge$pII[1] myTransMatr[2,3]<-transitionsAtAge$pID[1] myTransMatr[3,3]<-1 myMc<-new("markovchain", transitionMatrix = myTransMatr, states = possibleStates, name = paste("Age",age,"transition matrix")) return(myMc) } ``` Cause transitions are not homogeneous across ages, we use a `markovchainList` object to describe the transition probabilities for a guy starting at age 100. ```{r healthIns8-prob} getFullTransitionTable<-function(age){ ageSequence<-seq(from=age, to=120) k=1 myList=list() for ( i in ageSequence) { mc_age_i<-getMc4Age(age = i) myList[[k]]<-mc_age_i k=k+1 } myMarkovChainList<-new("markovchainList", markovchains = myList, name = paste("TransitionsSinceAge", age, sep = "")) return(myMarkovChainList) } transitionsSince100<-getFullTransitionTable(age=100) ``` We can use such transition for simulating ten life trajectories for a guy that begins "active" (A) aged 100: ```{r healthIns9} rmarkovchain(n = 10, object = transitionsSince100, what = "matrix", t0 = "A", include.t0 = TRUE) ``` Lets consider 1000 simulated live trajectories, for a healthy guy aged 80. We can compute the expected time a guy will be disabled starting active at age 80. ```{r healthIns10} transitionsSince80<-getFullTransitionTable(age=80) lifeTrajectories<-rmarkovchain(n=1e3, object=transitionsSince80, what="matrix",t0="A",include.t0=TRUE) temp<-matrix(0,nrow=nrow(lifeTrajectories),ncol = ncol(lifeTrajectories)) temp[lifeTrajectories=="I"]<-1 expected_period_disabled<-mean(rowSums((temp))) expected_period_disabled ``` Assuming that the health insurance will pay a benefit of 12000 per year disabled and that the real interest rate is 0.02, we can compute the lump sum premium at 80. ```{r healthIns11} mean(rowMeans(12000*temp%*%( matrix((1+0.02)^-seq(from=0, to=ncol(temp)-1))))) ``` ## Sociology {#app:sociology} Markov chains have been actively used to model progressions and regressions between social classes. The first study was performed by \cite{glassHall}, while a more recent application can be found in \cite{blandenEtAlii}. The table that follows shows the income quartile of the father when the son was 16 (in 1984) and the income quartile of the son when aged 30 (in 2000) for the 1970 cohort. ```{r blandenEtAlii} data("blanden") mobilityMc <- as(blanden, "markovchain") mobilityMc ``` The underlying transition graph is plotted in Figure \@ref(fig:mobility). ```{r mobility, fig=TRUE, echo=FALSE, fig.align='center', fig.cap="1970 UK cohort mobility data."} plot(mobilityMc, main = '1970 mobility',vertex.label.cex = 2, layout = layout.fruchterman.reingold) ``` The steady state distribution is computed as follows. Since transition across quartiles are shown, the probability function is evenly 0.25. ```{r blandenEtAlii3} round(steadyStates(mobilityMc), 2) ``` ## Genetics and Medicine {#sec:gen} This section contains two examples: the first shows the use of Markov chain models in genetics, the second shows an application of Markov chains in modelling diseases' dynamics. ### Genetics {#sec:genetics} \cite{averyHenderson} discusses the use of Markov chains in model Preprogucacon gene protein bases sequence. The `preproglucacon` dataset in \pkg{markovchain} contains the dataset shown in the package. ```{r preproglucacon1} data("preproglucacon", package = "markovchain") ``` It is possible to model the transition probabilities between bases as shown in the following code. ```{r preproglucacon2} mcProtein <- markovchainFit(preproglucacon$preproglucacon, name = "Preproglucacon MC")$estimate mcProtein ``` ### Medicine {#sec:medicine} Discrete-time Markov chains are also employed to study the progression of chronic diseases. The following example is taken from \cite{craigSendi}. Starting from six month follow-up data, the maximum likelihood estimation of the monthly transition matrix is obtained. This transition matrix aims to describe the monthly progression of CD4-cell counts of HIV infected subjects. ```{r epid1} craigSendiMatr <- matrix(c(682, 33, 25, 154, 64, 47, 19, 19, 43), byrow = T, nrow = 3) hivStates <- c("0-49", "50-74", "75-UP") rownames(craigSendiMatr) <- hivStates colnames(craigSendiMatr) <- hivStates craigSendiTable <- as.table(craigSendiMatr) mcM6 <- as(craigSendiTable, "markovchain") mcM6@name <- "Zero-Six month CD4 cells transition" mcM6 ``` As shown in the paper, the second passage consists in the decomposition of $M_{6}=V \cdot D \cdot V^{-1}$ in order to obtain $M_{1}$ as $M_{1}=V \cdot D^{1/6} \cdot V^{-1}$ . ```{r epid2} eig <- eigen(mcM6@transitionMatrix) D <- diag(eig$values) ``` ```{r epid3} V <- eig$vectors V %*% D %*% solve(V) d <- D ^ (1/6) M <- V %*% d %*% solve(V) mcM1 <- new("markovchain", transitionMatrix = M, states = hivStates) ``` # Discussion, issues and future plans The \pkg{markovchain} package has been designed in order to provide easily handling of DTMC and communication with alternative packages. The package has known several improvements in the recent years: many functions added, porting the software in Rcpp \pkg{Rcpp} package \citep{RcppR} and many methodological improvements that have improved the software reliability. # Acknowledgments {#sec:aknowledgements} The package was selected for Google Summer of Code 2015 support. The authors wish to thank Michael Cole, Tobi Gutman and Mildenberger Thoralf for their suggestions and bug checks. A final thanks also to Dr. Simona C. Minotti and Dr. Mirko Signorelli for their support in drafting this version of the vignettes. \clearpage # References # markovchain/inst/extdata/0000755000176200001440000000000015137702633015172 5ustar liggesusersmarkovchain/inst/extdata/ltdItaData.txt0000644000176200001440000001754415137702633017761 0ustar liggesusersage;pAD;pID;pAI;pAA 20;0.000461600217312132;0.0108336431456553;0.000176246694342926;0.999362153088345 21;0.000482488764261525;0.0107971909886194;0.000171057735612955;0.999346453500126 22;0.000494993811957668;0.0117707568980395;0.00015923328063064;0.999345772907412 23;0.00050429345591411;0.0115939415352431;0.00016057305104192;0.999335133493044 24;0.000507419256318309;0.0126057434590121;0.000160650439141657;0.99933193030454 25;0.000515426695613004;0.0152636383159172;0.000164360322575267;0.999320212981812 26;0.000525595649643117;0.0179897694669832;0.000161660453870113;0.999312743896487 27;0.000534823584740014;0.0166832171238458;0.000160135991146609;0.999305040424113 28;0.000537542563101855;0.0170682636815637;0.000159910891995352;0.999302546544903 29;0.00054010571047368;0.0189681430662499;0.000154068989936519;0.99930582529959 30;0.000540223049860296;0.0211937258794527;0.000157160647241465;0.999302616302898 31;0.000543650098790082;0.0273283049097408;0.000166920507275799;0.999289429393934 32;0.000550657084106076;0.0297242338721561;0.000176149326345398;0.999273193589549 33;0.000567666524533948;0.0312031127015988;0.000174959290838917;0.999257374184627 34;0.000589564479997331;0.0362614421255913;0.000181142949631881;0.999229292570371 35;0.00061806212917393;0.0400308116670889;0.000191872923626047;0.9991900649472 36;0.000650632511832817;0.0418389766775716;0.000199643253754878;0.999149724234412 37;0.000689474293782214;0.0467358834902955;0.000211977632924897;0.999098548073293 38;0.000731138219342785;0.0523964586730976;0.000221795302027688;0.99904706647863 39;0.000793533578699302;0.055177966832089;0.000240416543812767;0.998966049877488 40;0.000859207185781173;0.0585124067163057;0.000264674910015306;0.998876117904203 41;0.000920725621476301;0.0679656015534935;0.000282355739283444;0.99879691863924 42;0.000984991230447042;0.0757310812483226;0.000314702144743578;0.998700306624809 43;0.00105939521363715;0.0811703192912012;0.000349094911367465;0.998591509874995 44;0.00115292666392253;0.086792620615223;0.000376750745815673;0.998470322590262 45;0.00124967505305504;0.0951282991008114;0.000410031980278301;0.998340292966667 46;0.00134617679388282;0.104306046855522;0.000454381385459602;0.998199441820658 47;0.00147744439982092;0.108863822493192;0.000510090216360853;0.998012465383818 48;0.00160678108915558;0.115727419192654;0.000572265035829771;0.997820953875015 49;0.00175231797005106;0.122620733316479;0.000653974571998273;0.997593707457951 50;0.00192647416205334;0.128264864604654;0.000724389338373027;0.997349136499574 51;0.00211873152640448;0.132487558117908;0.00080347791835564;0.99707779055524 52;0.00231715656887456;0.140903426576919;0.000897377523639647;0.996785465907486 53;0.00257005954245349;0.148308971347905;0.000988973589213584;0.996440966868333 54;0.00282270426395954;0.152694601283312;0.00110175953341175;0.996075536202629 55;0.00308892793456527;0.156014449299285;0.00121842271667517;0.99569264934876 56;0.00334491994186366;0.160354906530207;0.00135221310972041;0.995302866948416 57;0.0036680988581455;0.163584432824716;0.0015021237857599;0.994829777356095 58;0.00404383426827132;0.166621852151551;0.00164389926056965;0.994312266471159 59;0.00446410949795472;0.172995362892957;0.00181867939206979;0.993717211109975 60;0.00491071415431949;0.175114162729557;0.00203795753783481;0.993051328307846 61;0.00541897972205418;0.176836610548664;0.00223079594105024;0.992350224336896 62;0.0058872972394518;0.179065380547063;0.00243327615598675;0.991679426604561 63;0.00635426205223726;0.18328905395475;0.00268594485354049;0.990959793094222 64;0.00684249456592532;0.186309865207791;0.00299675785270434;0.99016074758137 65;0.00745741108087491;0.18514690618557;0.0034276558099294;0.989114933109196 66;0.00813029947746983;0.186388327973927;0.00401498471329922;0.987854715809231 67;0.00894480989820999;0.189665426363304;0.00467257950797381;0.986382610593816 68;0.0097208540778663;0.190052370707222;0.00533807252107788;0.984941073401056 69;0.0106145155277237;0.188697039510498;0.0060901494087728;0.983295335063503 70;0.0115815669321884;0.190655935136829;0.00698624429053202;0.98143218877728 71;0.0126738410165432;0.193198718680596;0.0080193455961521;0.979306813387305 72;0.0137706103972485;0.194509663180914;0.00912917559527522;0.977100214007476 73;0.0151133228338427;0.197263891809617;0.0104517574376933;0.974434919728464 74;0.0167023100407502;0.19981346841012;0.011975363077487;0.971322326881763 75;0.0185015234683037;0.202602461969921;0.0136406055721243;0.967857870959572 76;0.020543474835099;0.20567144487397;0.0155521514540999;0.963904373710801 77;0.0226990867632567;0.209583024789031;0.0176773816301192;0.959623531606624 78;0.025242012724978;0.216536798215654;0.0199840051708029;0.954773982104219 79;0.0280714682899232;0.222812914664102;0.0225820740028015;0.949346457707275 80;0.0313087874273656;0.229026086379995;0.0254147939549998;0.943276418617635 81;0.0347623037317666;0.237135642759773;0.0283027615785079;0.936934934689726 82;0.0377325549166625;0.246569096453296;0.0313008704863735;0.930966574596964 83;0.0415709360291018;0.255103424222878;0.0350322493312845;0.923396814639614 84;0.0466891630019649;0.263805059072913;0.0392249475621001;0.914085889435935 85;0.0523809285174003;0.275827261187502;0.0435063497564212;0.904112721726179 86;0.0595353395950259;0.287154077419303;0.0477809616106315;0.892683698794343 87;0.0667672618557459;0.296736437668921;0.0520193008104549;0.881213437333799 88;0.0742284842880654;0.307394243456317;0.0567867940499367;0.868984721661998 89;0.08102260498543;0.318807087201621;0.0617441024832344;0.857233292531336 90;0.0879638443512971;0.330335659703562;0.0647086526386771;0.847327503010026 91;0.0956534410840304;0.34429171728002;0.0667471331812722;0.837599425734697 92;0.104629865442421;0.360343939150447;0.0679974373329284;0.827372697224651 93;0.114342159249409;0.376714600886086;0.0692694237579942;0.816388416992597 94;0.124830307842353;0.39337161823954;0.0705634029519849;0.804606289205662 95;0.136132619794602;0.410280336624487;0.0718796873808002;0.791987692824598 96;0.148284870965249;0.427403785267262;0.0732185913907422;0.778496537644009 97;0.16131934887796;0.44470297533411;0.0745804311136826;0.764100220008357 98;0.175263808419464;0.46213723752253;0.0759655243672633;0.748770667213273 99;0.190140356683496;0.479664593311152;0.0773741905500132;0.732485452766491 100;0.205964292416731;0.49724215292386;0.0788067505312643;0.715228957052005 101;0.222742933595137;0.514826532138116;0.0802635265357542;0.696993539869109 102;0.240474474656817;0.532374279403284;0.0817448420228012;0.677780683320382 103;0.259146922168122;0.549842304367436;0.0832510215599426;0.657602056271935 104;0.278737163401518;0.567188298860823;0.0847823906909263;0.636480445907556 105;0.299210225581725;0.584371141653351;0.0863392757979522;0.614450498620323 106;0.320518783551316;0.601351278877663;0.087922003958059;0.591559212490625 107;0.342602969587468;0.618091072857778;0.089530902793558;0.567866127618974 108;0.365390530594804;0.634555113161069;0.0911663003164189;0.543443169088777 109;0.388797364816074;0.650710484943252;0.0928285247665155;0.51837411041741 110;0.412728452936279;0.666526991020167;0.0945179044436453;0.492753642620076 111;0.437079177930066;0.681977325511973;0.0962347675332406;0.466686054536693 112;0.461737005643692;0.69703719830248;0.0979794419256961;0.440283552430612 113;0.486583475732349;0.711685410881454;0.099752255029241;0.41366426923841 114;0.511496432215896;0.725903885342153;0.101553533576292;0.386950034207812 115;0.536352406554738;0.739677649351817;0.103383603423233;0.360263990022029 116;0.561029055458654;0.752994780772872;0.105242789343564;0.333728155197782 117;0.585407551755433;0.765846316272406;0.107131414814384;0.307461033430183 118;0.609374829980436;0.778226128713706;0.109049801796177;0.281575368223387 119;0.632825598544515;0.790130778382749;0.110998270505863;0.256176130949622 120;0.655664046315679;0.801559343179304;0.112977139183108;0.231358814501213 121;1;1;0;0 markovchain/README.md0000644000176200001440000000226115137702633014043 0ustar liggesusers# markovchain R package providing classes, methods and function for easily handling Discrete Time Markov Chains (DTMC), performing probabilistic analysis and fitting. ## Install the current release from CRAN: ```r install.packages('markovchain') ``` ## Install the development version from GitHub: ```r devtools::install_github('spedygiorgio/markovchain') ``` ![alt tag](https://travis-ci.org/spedygiorgio/markovchain.svg?branch=master) [![Downloads](http://cranlogs.r-pkg.org/badges/markovchain)](https://cran.r-project.org/package=markovchain) [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/markovchain)](https://cran.r-project.org/package=markovchain) [![Research software impact](http://depsy.org/api/package/cran/markovchain/badge.svg)](http://depsy.org/package/r/markovchain) [![R-CMD-check](https://github.com/spedygiorgio/markovchain/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/spedygiorgio/markovchain/actions/workflows/R-CMD-check.yaml) [![Codecov test coverage](https://codecov.io/gh/spedygiorgio/markovchain/graph/badge.svg)](https://app.codecov.io/gh/spedygiorgio/markovchain) markovchain/build/0000755000176200001440000000000015137710471013661 5ustar liggesusersmarkovchain/build/vignette.rds0000644000176200001440000000052415137710471016221 0ustar liggesusersK0dzn CAD<y8 o!4i&%6kMDQxK^ޏ68ꎴor1 P1(r&aG9g8sFL~\]_^ LV8aqB%h۴Bu&kUla[*[ț;!—yQ EobcbrkXsǺ'l89DkR?0:m;0{0MQ-h3Z>GoR0}L͵gi&4إm?&/քR,+SڔeNDqHA$uGUmarkovchain/man/0000755000176200001440000000000015137710352013333 5ustar liggesusersmarkovchain/man/predictHommc.Rd0000644000176200001440000000162015137702633016242 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{predictHommc} \alias{predictHommc} \title{Simulate a higher order multivariate markovchain} \usage{ predictHommc(hommc,t,init) } \arguments{ \item{hommc}{a hommc-class object} \item{t}{no of iterations to predict} \item{init}{matrix of previous states size of which depends on hommc} } \value{ The function returns a matrix of size s X t displaying t predicted states in each row coressponding to every categorical sequence. } \description{ This function provides a prediction of states for a higher order multivariate markovchain object } \details{ The user is required to provide a matrix of giving n previous coressponding every categorical sequence. Dimensions of the init are s X n, where s is number of categorical sequences and n is order of the homc. } \author{ Vandit Jain } markovchain/man/markovchain-package.Rd0000644000176200001440000000330615137710352017517 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/markovchain.R \docType{package} \name{markovchain-package} \alias{markovchain} \alias{markovchain-package} \title{Easy Handling Discrete Time Markov Chains} \description{ The package contains classes and method to create and manage (plot, print, export for example) discrete time Markov chains (DTMC). In addition it provide functions to perform statistical (fitting and drawing random variates) and probabilistic (analysis of DTMC proprieties) analysis } \examples{ # create some markov chains statesNames=c("a","b") mcA<-new("markovchain", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9),byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames))) statesNames=c("a","b","c") mcB<-new("markovchain", states=statesNames, transitionMatrix= matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, byrow=TRUE, dimnames=list(statesNames, statesNames))) statesNames=c("a","b","c","d") matrice<-matrix(c(0.25,0.75,0,0,0.4,0.6,0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) mcC<-new("markovchain", states=statesNames, transitionMatrix=matrice) mcD<-new("markovchain", transitionMatrix=matrix(c(0,1,0,1), nrow=2,byrow=TRUE)) #operations with S4 methods mcA^2 steadyStates(mcB) absorbingStates(mcB) markovchainSequence(n=20, markovchain=mcC, include=TRUE) } \references{ Discrete-Time Markov Models, Bremaud, Springer 1999 } \seealso{ Useful links: \itemize{ \item \url{https://github.com/spedygiorgio/markovchain/} \item Report bugs at \url{https://github.com/spedygiorgio/markovchain/issues} } } \author{ Giorgio Alfredo Spedicato Maintainer: Giorgio Alfredo Spedicato } \keyword{package} markovchain/man/preproglucacon.Rd0000644000176200001440000000152215137702633016650 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{preproglucacon} \alias{preproglucacon} \title{Preprogluccacon DNA protein bases sequences} \format{ A data frame with 1572 observations on the following 2 variables. \describe{ \item{\code{V1}}{a numeric vector, showing original coding} \item{\code{preproglucacon}}{a character vector, showing initial of DNA bases (Adenine, Cytosine, Guanine, Thymine)} } } \source{ Avery Henderson } \usage{ data(preproglucacon) } \description{ Sequence of bases for preproglucacon DNA protein } \examples{ data(preproglucacon) preproglucaconMc<-markovchainFit(data=preproglucacon$preproglucacon) } \references{ Averuy Henderson, Fitting markov chain models on discrete time series such as DNA sequences } \keyword{datasets} markovchain/man/setName.Rd0000644000176200001440000000156115137702633015224 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{name<-} \alias{name<-} \alias{name<-,markovchain-method} \title{Method to set name of markovchain object} \usage{ name(object) <- value \S4method{name}{markovchain}(object) <- value } \arguments{ \item{object}{A markovchain object} \item{value}{New name of markovchain object} } \description{ This method modifies the existing name of markovchain object } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) name(markovB) <- "dangerous mc" } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/noofVisitsDist.Rd0000644000176200001440000000201115137702633016606 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{noofVisitsDist} \alias{noofVisitsDist} \title{return a joint pdf of the number of visits to the various states of the DTMC} \usage{ noofVisitsDist(markovchain,N,state) } \arguments{ \item{markovchain}{a markovchain-class object} \item{N}{no of steps} \item{state}{the initial state} } \value{ a numeric vector depicting the above described probability density function. } \description{ This function would return a joint pdf of the number of visits to the various states of the DTMC during the first N steps. } \details{ This function would return a joint pdf of the number of visits to the various states of the DTMC during the first N steps. } \examples{ transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") noofVisitsDist(simpleMc,5,"a") } \author{ Vandit Jain } markovchain/man/generatorToTransitionMatrix.Rd0000644000176200001440000000201415137702633021353 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{generatorToTransitionMatrix} \alias{generatorToTransitionMatrix} \title{Function to obtain the transition matrix from the generator} \usage{ generatorToTransitionMatrix(gen, byrow = TRUE) } \arguments{ \item{gen}{The generator matrix} \item{byrow}{Flag to determine if rows (columns) sum to 0} } \value{ Returns the transition matrix. } \description{ The transition matrix of the embedded DTMC is inferred from the CTMC's generator } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) generatorToTransitionMatrix(gen) } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison } \seealso{ \code{\link{rctmc}},\code{\link{ctmc-class}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/is.accessible.Rd0000644000176200001440000000251615137702633016340 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.accessible} \alias{is.accessible} \title{Verify if a state j is reachable from state i.} \usage{ is.accessible(object, from, to) } \arguments{ \item{object}{A \code{markovchain} object.} \item{from}{The name of state "i" (beginning state).} \item{to}{The name of state "j" (ending state).} } \value{ A boolean value. } \description{ This function verifies if a state is reachable from another, i.e., if there exists a path that leads to state j leaving from state i with positive probability } \details{ It wraps an internal function named \code{reachabilityMatrix}. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) ) ) is.accessible(markovB, "a", "c") } \references{ James Montgomery, University of Madison } \seealso{ \code{is.irreducible} } \author{ Giorgio Spedicato, Ignacio Cordón } markovchain/man/impreciseProbabilityatT.Rd0000644000176200001440000000217215137702633020461 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{impreciseProbabilityatT} \alias{impreciseProbabilityatT} \title{Calculating full conditional probability using lower rate transition matrix} \usage{ impreciseProbabilityatT(C,i,t,s,error,useRCpp) } \arguments{ \item{C}{a ictmc class object} \item{i}{initial state at time t} \item{t}{initial time t. Default value = 0} \item{s}{final time} \item{error}{error rate. Default value = 0.001} \item{useRCpp}{logical whether to use RCpp implementation; by default TRUE} } \description{ This function calculates full conditional probability at given time s using lower rate transition matrix } \examples{ states <- c("n","y") Q <- matrix(c(-1,1,1,-1),nrow = 2,byrow = TRUE,dimnames = list(states,states)) range <- matrix(c(1/52,3/52,1/2,2),nrow = 2,byrow = 2) name <- "testictmc" ictmc <- new("ictmc",states = states,Q = Q,range = range,name = name) impreciseProbabilityatT(ictmc,2,0,1,10^-3,TRUE) } \references{ Imprecise Continuous-Time Markov Chains, Thomas Krak et al., 2016 } \author{ Vandit Jain } markovchain/man/markovchainSequence.Rd0000644000176200001440000000261415137702633017623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{markovchainSequence} \alias{markovchainSequence} \title{Function to generate a sequence of states from homogeneous Markov chains.} \usage{ markovchainSequence( n, markovchain, t0 = sample(markovchain@states, 1), include.t0 = FALSE, useRCpp = TRUE ) } \arguments{ \item{n}{Sample size} \item{markovchain}{\code{markovchain} object} \item{t0}{The initial state} \item{include.t0}{Specify if the initial state shall be used} \item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.} } \value{ A Character Vector } \description{ Provided any \code{markovchain} object, it returns a sequence of states coming from the underlying stationary distribution. } \details{ A sequence of size n is sampled. } \examples{ # define the markovchain object statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) # show the sequence outs <- markovchainSequence(n = 100, markovchain = mcB, t0 = "a") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainFit}} } \author{ Giorgio Spedicato } markovchain/man/firstPassage.Rd0000644000176200001440000000205315137702633016260 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{firstPassage} \alias{firstPassage} \title{First passage across states} \usage{ firstPassage(object, state, n) } \arguments{ \item{object}{A \code{markovchain} object} \item{state}{Initial state} \item{n}{Number of rows on which compute the distribution} } \value{ A matrix of size 1:n x number of states showing the probability of the first time of passage in states to be exactly the number in the row. } \description{ This function compute the first passage probability in states } \details{ Based on Feres' Matlab listings } \examples{ simpleMc <- new("markovchain", states = c("a", "b"), transitionMatrix = matrix(c(0.4, 0.6, .3, .7), nrow = 2, byrow = TRUE)) firstPassage(simpleMc, "b", 20) } \references{ Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains } \seealso{ \code{\link{conditionalDistribution}} } \author{ Giorgio Spedicato } markovchain/man/fitHigherOrder.Rd0000644000176200001440000000222015137702633016526 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitHigherOrder.R \name{fitHigherOrder} \alias{fitHigherOrder} \alias{seq2freqProb} \alias{seq2matHigh} \title{Functions to fit a higher order Markov chain} \usage{ fitHigherOrder(sequence, order = 2) seq2freqProb(sequence) seq2matHigh(sequence, order) } \arguments{ \item{sequence}{A character list.} \item{order}{Markov chain order} } \value{ A list containing lambda, Q, and X. } \description{ Given a sequence of states arising from a stationary state, it fits the underlying Markov chain distribution with higher order. } \examples{ sequence<-c("a", "a", "b", "b", "a", "c", "b", "a", "b", "c", "a", "b", "c", "a", "b", "c", "a", "b", "a", "b") fitHigherOrder(sequence) } \references{ Ching, W. K., Huang, X., Ng, M. K., & Siu, T. K. (2013). Higher-order markov chains. In Markov Chains (pp. 141-176). Springer US. Ching, W. K., Ng, M. K., & Fung, E. S. (2008). Higher-order multivariate Markov chains and their applications. Linear Algebra and its Applications, 428(2), 492-507. } \author{ Giorgio Spedicato, Tae Seung Kang } markovchain/man/HigherOrderMarkovChain-class.Rd0000644000176200001440000000047515137702633021263 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fitHigherOrder.R \docType{class} \name{HigherOrderMarkovChain-class} \alias{HigherOrderMarkovChain-class} \title{Higher order Markov Chains class} \description{ The S4 class that describes \code{HigherOrderMarkovChain} objects. } markovchain/man/names.Rd0000644000176200001440000000061215137702633014727 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{names,markovchain-method} \alias{names,markovchain-method} \title{Returns the states for a Markov chain object} \usage{ \S4method{names}{markovchain}(x) } \arguments{ \item{x}{object we want to return states for} } \description{ Returns the states for a Markov chain object } markovchain/man/states.Rd0000644000176200001440000000177415137702633015141 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{states} \alias{states} \alias{states,markovchain-method} \title{Defined states of a transition matrix} \usage{ states(object) \S4method{states}{markovchain}(object) } \arguments{ \item{object}{A discrete \code{markovchain} object} } \value{ The character vector corresponding to states slot. } \description{ This method returns the states of a transition matrix. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) states(markovB) names(markovB) } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/holson.Rd0000644000176200001440000000236015137702633015130 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{holson} \alias{holson} \title{Holson data set} \format{ A data frame with 1000 observations on the following 12 variables. \describe{ \item{\code{id}}{unique id} \item{\code{time1}}{observed status at i-th time} \item{\code{time2}}{observed status at i-th time} \item{\code{time3}}{observed status at i-th time} \item{\code{time4}}{observed status at i-th time} \item{\code{time5}}{observed status at i-th time} \item{\code{time6}}{observed status at i-th time} \item{\code{time7}}{observed status at i-th time} \item{\code{time8}}{observed status at i-th time} \item{\code{time9}}{observed status at i-th time} \item{\code{time10}}{observed status at i-th time} \item{\code{time11}}{observed status at i-th time} } } \source{ Private communications } \usage{ data(holson) } \description{ A data set containing 1000 life histories trajectories and a categorical status (1,2,3) observed on eleven evenly spaced steps. } \details{ The example can be used to fit a \code{markovchain} or a \code{markovchainList} object. } \examples{ data(holson) head(holson) } \references{ Private communications } \keyword{datasets} markovchain/man/sales.Rd0000644000176200001440000000131715137702633014736 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{sales} \alias{sales} \title{Sales Demand Sequences} \format{ An object of class \code{matrix} (inherits from \code{array}) with 269 rows and 5 columns. } \usage{ data("sales") } \description{ Sales demand sequences of five products (A, B, C, D, E). Each row corresponds to a sequence. First row corresponds to Sequence A, Second row to Sequence B and so on. } \details{ The example can be used to fit High order multivariate markov chain. } \examples{ data("sales") # fitHighOrderMultivarMC(seqMat = sales, order = 2, Norm = 2) } \keyword{datasets} markovchain/man/priorDistribution.Rd0000644000176200001440000000372015137702633017362 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{priorDistribution} \alias{priorDistribution} \title{priorDistribution} \usage{ priorDistribution(transMatr, hyperparam = matrix()) } \arguments{ \item{transMatr}{The transition matrix whose probability is the parameter of interest.} \item{hyperparam}{The hyperparam matrix (optional). If not provided, a default value of 1 is assumed for each and therefore the resulting probability distribution is uniform.} } \value{ The log of the probabilities for each state is returned in a numeric vector. Each number in the vector represents the probability (log) of having a probability transition vector as specified in corresponding the row of the transition matrix. } \description{ Function to evaluate the prior probability of a transition matrix. It is based on conjugate priors and therefore a Dirichlet distribution is used to model the transitions of each state. } \details{ The states (dimnames) of the transition matrix and the hyperparam may be in any order. } \note{ This function can be used in conjunction with inferHyperparam. For example, if the user has a prior data set and a prior transition matrix, he can infer the hyperparameters using inferHyperparam and then compute the probability of their prior matrix using the inferred hyperparameters with priorDistribution. } \examples{ priorDistribution(matrix(c(0.5, 0.5, 0.5, 0.5), nrow = 2, dimnames = list(c("a", "b"), c("a", "b"))), matrix(c(2, 2, 2, 2), nrow = 2, dimnames = list(c("a", "b"), c("a", "b")))) } \references{ Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{predictiveDistribution}}, \code{\link{inferHyperparam}} } \author{ Sai Bhargav Yalamanchi, Giorgio Spedicato } markovchain/man/conditionalDistribution.Rd0000644000176200001440000000213015137702633020524 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{conditionalDistribution} \alias{conditionalDistribution} \title{\code{conditionalDistribution} of a Markov Chain} \usage{ conditionalDistribution(object, state) } \arguments{ \item{object}{A \code{markovchain} object.} \item{state}{Subsequent state.} } \value{ A named probability vector } \description{ It extracts the conditional distribution of the subsequent state, given current state. } \examples{ # define a markov chain statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1),nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) conditionalDistribution(markovB, "b") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/rain.Rd0000644000176200001440000000131315137702633014554 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{rain} \alias{rain} \title{Alofi island daily rainfall} \format{ A data frame with 1096 observations on the following 2 variables. \describe{ \item{\code{V1}}{a numeric vector, showing original coding} \item{\code{rain}}{a character vector, showing daily rainfall millilitres brackets} } } \source{ Avery Henderson } \usage{ data(rain) } \description{ Rainfall measured in Alofi Island } \examples{ data(rain) rainMc<-markovchainFit(data=rain$rain) } \references{ Avery Henderson, Fitting markov chain models on discrete time series such as DNA sequences } \keyword{datasets} markovchain/man/statisticalTests.Rd0000644000176200001440000000564715137702633017210 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/statisticalTests.R \name{verifyMarkovProperty} \alias{verifyMarkovProperty} \alias{assessOrder} \alias{assessStationarity} \alias{verifyEmpiricalToTheoretical} \alias{verifyHomogeneity} \title{Various functions to perform statistical inference of DTMC} \usage{ verifyMarkovProperty(sequence, verbose = TRUE) assessOrder(sequence, verbose = TRUE) assessStationarity(sequence, nblocks, verbose = TRUE) verifyEmpiricalToTheoretical(data, object, verbose = TRUE) verifyHomogeneity(inputList, verbose = TRUE) } \arguments{ \item{sequence}{An empirical sequence.} \item{verbose}{Should test results be printed out?} \item{nblocks}{Number of blocks.} \item{data}{matrix, character or list to be converted in a raw transition matrix} \item{object}{a markovchain object} \item{inputList}{A list of items that can coerced to transition matrices} } \value{ Verification result a list with following slots: statistic (the chi - square statistic), dof (degrees of freedom), and corresponding p-value. In case a cell in the empirical transition matrix is >0 that is 0 in the theoretical transition matrix the null hypothesis is rejected. In that case a p-value of 0 and statistic and dof of NA are returned. a list of transition matrices? } \description{ These functions verify the Markov property, assess the order and stationarity of the Markov chain. This function tests whether an empirical transition matrix is statistically compatible with a theoretical one. It is a chi-square based test. In case a cell in the empirical transition matrix is >0 that is 0 in the theoretical transition matrix the null hypothesis is rejected. Verifies that the s elements in the input list belongs to the same DTMC } \examples{ sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit <- markovchainFit(data = sequence, byrow = FALSE) verifyMarkovProperty(sequence) assessOrder(sequence) assessStationarity(sequence, 1) #Example taken from Kullback Kupperman Tests for Contingency Tables and Markov Chains sequence<-c(0,1,2,2,1,0,0,0,0,0,0,1,2,2,2,1,0,0,1,0,0,0,0,0,0,1,1, 2,0,0,2,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,0,0,0,0,2,1,0, 0,2,1,0,0,0,0,0,0,1,1,1,2,2,0,0,2,1,1,1,1,2,1,1,1,1,1,1,1,1,1,0,2, 0,1,1,0,0,0,1,2,2,0,0,0,0,0,0,2,2,2,1,1,1,1,0,1,1,1,1,0,0,2,1,1, 0,0,0,0,0,2,2,1,1,1,1,1,2,1,2,0,0,0,1,2,2,2,0,0,0,1,1) mc=matrix(c(5/8,1/4,1/8,1/4,1/2,1/4,1/4,3/8,3/8),byrow=TRUE, nrow=3) rownames(mc)<-colnames(mc)<-0:2; theoreticalMc<-as(mc, "markovchain") verifyEmpiricalToTheoretical(data=sequence,object=theoreticalMc) data(kullback) verifyHomogeneity(inputList=kullback,verbose=TRUE) } \references{ Anderson and Goodman. } \seealso{ \code{markovchain} } \author{ Tae Seung Kang, Giorgio Alfredo Spedicato } \concept{statisticalTests} markovchain/man/is.CTMCirreducible.Rd0000644000176200001440000000165615137702633017207 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{is.CTMCirreducible} \alias{is.CTMCirreducible} \title{Check if CTMC is irreducible} \usage{ is.CTMCirreducible(ctmc) } \arguments{ \item{ctmc}{a ctmc-class object} } \value{ a boolean value as described above. } \description{ This function verifies whether a CTMC object is irreducible } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.CTMCirreducible(molecularCTMC) } \references{ Continuous-Time Markov Chains, Karl Sigman, Columbia University } \author{ Vandit Jain } markovchain/man/craigsendi.Rd0000644000176200001440000000165215137702633015741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{craigsendi} \alias{craigsendi} \title{CD4 cells counts on HIV Infects between zero and six month} \format{ The format is: table [1:3, 1:3] 682 154 19 33 64 19 25 47 43 - attr(*, "dimnames")=List of 2 ..$ : chr [1:3] "0-49" "50-74" "75-UP" ..$ : chr [1:3] "0-49" "50-74" "75-UP" } \source{ Estimation of the transition matrix of a discrete time Markov chain, Bruce A. Craig and Peter P. Sendi, Health Economics 11, 2002. } \usage{ data(craigsendi) } \description{ This is the table shown in Craig and Sendi paper showing zero and six month CD4 cells count in six brakets } \details{ Rows represent counts at the beginning, cols represent counts after six months. } \examples{ data(craigsendi) csMc<-as(craigsendi, "markovchain") steadyStates(csMc) } \references{ see source } \keyword{datasets} markovchain/man/absorptionProbabilities.Rd0000644000176200001440000000222115137702633020513 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{absorptionProbabilities} \alias{absorptionProbabilities} \title{Absorption probabilities} \usage{ absorptionProbabilities(object) } \arguments{ \item{object}{the markovchain object} } \value{ A named vector with the expected number of steps to go from a transient state to any of the recurrent ones } \description{ Computes the absorption probability from each transient state to each recurrent one (i.e. the (i, j) entry or (j, i), in a stochastic matrix by columns, represents the probability that the first not transient state we can go from the transient state i is j (and therefore we are going to be absorbed in the communicating recurrent class of j) } \examples{ m <- matrix(c(1/2, 1/2, 0, 1/2, 1/2, 0, 0, 1/2, 1/2), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) absorptionProbabilities(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/rctmc.Rd0000644000176200001440000000341615137702633014741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{rctmc} \alias{rctmc} \title{rctmc} \usage{ rctmc(n, ctmc, initDist = numeric(), T = 0, include.T0 = TRUE, out.type = "list") } \arguments{ \item{n}{The number of samples to generate.} \item{ctmc}{The CTMC S4 object.} \item{initDist}{The initial distribution of states.} \item{T}{The time up to which the simulation runs (all transitions after time T are not returned).} \item{include.T0}{Flag to determine if start state is to be included.} \item{out.type}{"list" or "df"} } \value{ Based on out.type, a list or a data frame is returned. The returned list has two elements - a character vector (states) and a numeric vector (indicating time of transitions). The data frame is similarly structured. } \description{ The function generates random CTMC transitions as per the provided generator matrix. } \details{ In order to use the T0 argument, set n to Inf. } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") statesDist <- c(0.8, 0.2) rctmc(n = Inf, ctmc = molecularCTMC, T = 1) rctmc(n = 5, ctmc = molecularCTMC, initDist = statesDist, include.T0 = FALSE) } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison } \seealso{ \code{\link{generatorToTransitionMatrix}},\code{\link{ctmc-class}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/expectedRewards.Rd0000644000176200001440000000210715137702633016756 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{expectedRewards} \alias{expectedRewards} \title{Expected Rewards for a markovchain} \usage{ expectedRewards(markovchain,n,rewards) } \arguments{ \item{markovchain}{the markovchain-class object} \item{n}{no of steps of the process} \item{rewards}{vector depicting rewards coressponding to states} } \value{ returns a vector of expected rewards for different initial states } \description{ Given a markovchain object and reward values for every state, function calculates expected reward value after n steps. } \details{ the function uses a dynamic programming approach to solve a recursive equation described in reference. } \examples{ transMatr<-matrix(c(0.99,0.01,0.01,0.99),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr) expectedRewards(simpleMc,1,c(0,1)) } \references{ Stochastic Processes: Theory for Applications, Robert G. Gallager, Cambridge University Press } \author{ Vandit Jain } markovchain/man/steadyStates.Rd0000644000176200001440000000241415137702633016303 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{steadyStates} \alias{steadyStates} \title{Stationary states of a \code{markovchain} object} \usage{ steadyStates(object) } \arguments{ \item{object}{A discrete \code{markovchain} object} } \value{ A matrix corresponding to the stationary states } \description{ This method returns the stationary vector in matricial form of a markovchain object. } \note{ The steady states are identified starting from which eigenvectors correspond to identity eigenvalues and then normalizing them to sum up to unity. When negative values are found in the matrix, the eigenvalues extraction is performed on the recurrent classes submatrix. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) steadyStates(markovB) } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/transition2Generator.Rd0000644000176200001440000000145215137702633017752 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{transition2Generator} \alias{transition2Generator} \title{Return the generator matrix for a corresponding transition matrix} \usage{ transition2Generator(P, t = 1, method = "logarithm") } \arguments{ \item{P}{transition matrix between time 0 and t} \item{t}{time of observation} \item{method}{"logarithm" returns the Matrix logarithm of the transition matrix} } \value{ A matrix that represent the generator of P } \description{ Calculate the generator matrix for a corresponding transition matrix } \examples{ mymatr <- matrix(c(.4, .6, .1, .9), nrow = 2, byrow = TRUE) Q <- transition2Generator(P = mymatr) expm::expm(Q) } \seealso{ \code{\link{rctmc}} } markovchain/man/kullback.Rd0000644000176200001440000000064615137702633015423 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{kullback} \alias{kullback} \title{Example from Kullback and Kupperman Tests for Contingency Tables} \format{ A list containing two 6x6 non - negative integer matrices } \usage{ data(kullback) } \description{ A list of two matrices representing raw transitions between two states } \keyword{datasets} markovchain/man/structuralAnalysis.Rd0000644000176200001440000000630315137702633017543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R, R/probabilistic.R \name{period} \alias{period} \alias{communicatingClasses} \alias{transientStates} \alias{recurrentStates} \alias{absorbingStates} \alias{transientClasses} \alias{recurrentClasses} \alias{canonicForm} \title{Various function to perform structural analysis of DTMC} \usage{ period(object) communicatingClasses(object) recurrentClasses(object) transientClasses(object) transientStates(object) recurrentStates(object) absorbingStates(object) canonicForm(object) } \arguments{ \item{object}{A \code{markovchain} object.} } \value{ \describe{ \item{\code{period}}{returns a integer number corresponding to the periodicity of the Markov chain (if it is irreducible)} \item{\code{absorbingStates}}{returns a character vector with the names of the absorbing states in the Markov chain} \item{\code{communicatingClasses}}{returns a list in which each slot contains the names of the states that are in that communicating class} \item{\code{recurrentClasses}}{analogously to \code{communicatingClasses}, but with recurrent classes} \item{\code{transientClasses}}{analogously to \code{communicatingClasses}, but with transient classes} \item{\code{transientStates}}{returns a character vector with all the transient states for the Markov chain} \item{\code{recurrentStates}}{returns a character vector with all the recurrent states for the Markov chain} \item{\code{canonicForm}}{returns the Markov chain reordered by a permutation of states so that we have blocks submatrices for each of the recurrent classes and a collection of rows in the end for the transient states} } } \description{ These functions return absorbing and transient states of the \code{markovchain} objects. } \examples{ statesNames <- c("a", "b", "c") mc <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames)) ) communicatingClasses(mc) recurrentClasses(mc) recurrentClasses(mc) absorbingStates(mc) transientStates(mc) recurrentStates(mc) canonicForm(mc) # periodicity analysis A <- matrix(c(0, 1, 0, 0, 0.5, 0, 0.5, 0, 0, 0.5, 0, 0.5, 0, 0, 1, 0), nrow = 4, ncol = 4, byrow = TRUE) mcA <- new("markovchain", states = c("a", "b", "c", "d"), transitionMatrix = A, name = "A") is.irreducible(mcA) #true period(mcA) #2 # periodicity analysis B <- matrix(c(0, 0, 1/2, 1/4, 1/4, 0, 0, 0, 0, 1/3, 0, 2/3, 0, 0, 0, 0, 0, 0, 0, 1/3, 2/3, 0, 0, 0, 0, 0, 1/2, 1/2, 0, 0, 0, 0, 0, 3/4, 1/4, 1/2, 1/2, 0, 0, 0, 0, 0, 1/4, 3/4, 0, 0, 0, 0, 0), byrow = TRUE, ncol = 7) mcB <- new("markovchain", transitionMatrix = B) period(mcB) } \references{ Feres, Matlab listing for markov chain. } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Alfredo Spedicato, Ignacio Cordón } markovchain/man/rmarkovchain.Rd0000644000176200001440000000547315137702633016322 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{rmarkovchain} \alias{rmarkovchain} \title{Function to generate a sequence of states from homogeneous or non-homogeneous Markov chains.} \usage{ rmarkovchain( n, object, what = "data.frame", useRCpp = TRUE, parallel = FALSE, num.cores = NULL, ... ) } \arguments{ \item{n}{Sample size} \item{object}{Either a \code{markovchain} or a \code{markovchainList} object} \item{what}{It specifies whether either a \code{data.frame} or a \code{matrix} (each rows represent a simulation) or a \code{list} is returned.} \item{useRCpp}{Boolean. Should RCpp fast implementation being used? Default is yes.} \item{parallel}{Boolean. Should parallel implementation being used? Default is yes.} \item{num.cores}{Number of Cores to be used} \item{...}{additional parameters passed to the internal sampler} } \value{ Character Vector, data.frame, list or matrix } \description{ Provided any \code{markovchain} or \code{markovchainList} objects, it returns a sequence of states coming from the underlying stationary distribution. } \details{ When a homogeneous process is assumed (\code{markovchain} object) a sequence is sampled of size n. When a non - homogeneous process is assumed, n samples are taken but the process is assumed to last from the begin to the end of the non-homogeneous markov process. } \note{ Check the type of input } \examples{ # define the markovchain object statesNames <- c("a", "b", "c") mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) # show the sequence outs <- rmarkovchain(n = 100, object = mcB, what = "list") #define markovchainList object statesNames <- c("a", "b", "c") mcA <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mcC <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 0.2, 0.8, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames))) mclist <- new("markovchainList", markovchains = list(mcA, mcB, mcC)) # show the list of sequence rmarkovchain(100, mclist, "list") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainFit}}, \code{\link{markovchainSequence}} } \author{ Giorgio Spedicato } markovchain/man/freq2Generator.Rd0000644000176200001440000000241115137702633016511 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{freq2Generator} \alias{freq2Generator} \title{Returns a generator matrix corresponding to frequency matrix} \usage{ freq2Generator(P, t = 1, method = "QO", logmethod = "Eigen") } \arguments{ \item{P}{relative frequency matrix} \item{t}{(default value = 1)} \item{method}{one among "QO"(Quasi optimaisation), "WA"(weighted adjustment), "DA"(diagonal adjustment)} \item{logmethod}{method for computation of matrx algorithm (by default : Eigen)} } \value{ returns a generator matix with same dimnames } \description{ The function provides interface to calculate generator matrix corresponding to a frequency matrix and time taken } \examples{ sample <- matrix(c(150,2,1,1,1,200,2,1,2,1,175,1,1,1,1,150),nrow = 4,byrow = TRUE) sample_rel = rbind((sample/rowSums(sample))[1:dim(sample)[1]-1,],c(rep(0,dim(sample)[1]-1),1)) freq2Generator(sample_rel,1) data(tm_abs) tm_rel=rbind((tm_abs/rowSums(tm_abs))[1:7,],c(rep(0,7),1)) ## Derive quasi optimization generator matrix estimate freq2Generator(tm_rel,1) } \references{ E. Kreinin and M. Sidelnikova: Regularization Algorithms for Transition Matrices. Algo Research Quarterly 4(1):23-40, 2001 } markovchain/man/meanFirstPassageTime.Rd0000644000176200001440000000354315137702633017705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanFirstPassageTime} \alias{meanFirstPassageTime} \title{Mean First Passage Time for irreducible Markov chains} \usage{ meanFirstPassageTime(object, destination) } \arguments{ \item{object}{the markovchain object} \item{destination}{a character vector representing the states respect to which we want to compute the mean first passage time. Empty by default} } \value{ a Matrix of the same size with the average first passage times if destination is empty, a vector if destination is not } \description{ Given an irreducible (ergodic) markovchain object, this function calculates the expected number of steps to reach other states } \details{ For an ergodic Markov chain it computes: \itemize{ \item If destination is empty, the average first time (in steps) that takes the Markov chain to go from initial state i to j. (i, j) represents that value in case the Markov chain is given row-wise, (j, i) in case it is given col-wise. \item If destination is not empty, the average time it takes us from the remaining states to reach the states in \code{destination} } } \examples{ m <- matrix(1 / 10 * c(6,3,1, 2,3,5, 4,1,5), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) meanFirstPassageTime(mc, "r") # Grinstead and Snell's "Oz weather" worked out example mOz <- matrix(c(2,1,1, 2,0,2, 1,1,2)/4, ncol = 3, byrow = TRUE) mcOz <- new("markovchain", states = c("s", "c", "r"), transitionMatrix = mOz) meanFirstPassageTime(mcOz) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Toni Giorgino, Ignacio Cordón } markovchain/man/meanNumVisits.Rd0000644000176200001440000000170115137702633016426 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanNumVisits} \alias{meanNumVisits} \title{Mean num of visits for markovchain, starting at each state} \usage{ meanNumVisits(object) } \arguments{ \item{object}{the markovchain-class object} } \value{ a matrix with the expect number of visits to each state } \description{ Given a markovchain object, this function calculates a matrix where the element (i, j) represents the expect number of visits to the state j if the chain starts at i (in a Markov chain by columns it would be the element (j, i) instead) } \examples{ M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 mc <- new("markovchain", transitionMatrix = M) meanNumVisits(mc) } \references{ R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 } \author{ Ignacio Cordón } markovchain/man/hittingProbabilities.Rd0000644000176200001440000000145315137702633020007 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{hittingProbabilities} \alias{hittingProbabilities} \title{Hitting probabilities for markovchain} \usage{ hittingProbabilities(object) } \arguments{ \item{object}{the markovchain-class object} } \value{ a matrix of hitting probabilities } \description{ Given a markovchain object, this function calculates the probability of ever arriving from state i to j } \examples{ M <- markovchain:::zeros(5) M[1,1] <- M[5,5] <- 1 M[2,1] <- M[2,3] <- 1/2 M[3,2] <- M[3,4] <- 1/2 M[4,2] <- M[4,5] <- 1/2 mc <- new("markovchain", transitionMatrix = M) hittingProbabilities(mc) } \references{ R. Vélez, T. Prieto, Procesos Estocásticos, Librería UNED, 2013 } \author{ Ignacio Cordón } markovchain/man/is.regular.Rd0000644000176200001440000000155015137702633015701 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.regular} \alias{is.regular} \title{Check if a DTMC is regular} \usage{ is.regular(object) } \arguments{ \item{object}{a markovchain object} } \value{ A boolean value } \description{ Function to check wether a DTCM is regular } \details{ A Markov chain is regular if some of the powers of its matrix has all elements strictly positive } \examples{ P <- matrix(c(0.5, 0.25, 0.25, 0.5, 0, 0.5, 0.25, 0.25, 0.5), nrow = 3) colnames(P) <- rownames(P) <- c("R","N","S") ciao <- as(P, "markovchain") is.regular(ciao) } \references{ Matrix Analysis. Roger A.Horn, Charles R.Johnson. 2nd edition. Corollary 8.5.8, Theorem 8.5.9 } \seealso{ \code{\link{is.irreducible}} } \author{ Ignacio Cordón } markovchain/man/transitionProbability.Rd0000644000176200001440000000227315137702633020224 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{transitionProbability} \alias{transitionProbability} \alias{transitionProbability,markovchain-method} \title{Function to get the transition probabilities from initial to subsequent states.} \usage{ transitionProbability(object, t0, t1) \S4method{transitionProbability}{markovchain}(object, t0, t1) } \arguments{ \item{object}{A \code{markovchain} object.} \item{t0}{Initial state.} \item{t1}{Subsequent state.} } \value{ Numeric Vector } \description{ This is a convenience function to get transition probabilities. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) transitionProbability(markovB,"b", "c") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } markovchain/man/markovchainListFit.Rd0000644000176200001440000000240315137702633017425 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{markovchainListFit} \alias{markovchainListFit} \title{markovchainListFit} \usage{ markovchainListFit(data, byrow = TRUE, laplacian = 0, name) } \arguments{ \item{data}{Either a matrix or a data.frame or a list object.} \item{byrow}{Indicates whether distinc stochastic processes trajectiories are shown in distinct rows.} \item{laplacian}{Laplacian correction (default 0).} \item{name}{Optional name.} } \value{ A list containing two slots: estimate (the estimate) name } \description{ Given a data frame or a matrix (rows are observations, by cols the temporal sequence), it fits a non - homogeneous discrete time markov chain process (storing row). In particular a markovchainList of size = ncol - 1 is obtained estimating transitions from the n samples given by consecutive column pairs. } \details{ If \code{data} contains \code{NAs} then the transitions containing \code{NA} will be ignored. } \examples{ # using holson dataset data(holson) # fitting a single markovchain singleMc <- markovchainFit(data = holson[,2:12]) # fitting a markovchainList mclistFit <- markovchainListFit(data = holson[, 2:12], name = "holsonMcList") } markovchain/man/hommc-show.Rd0000644000176200001440000000064115137702633015707 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{show,hommc-method} \alias{show,hommc-method} \title{Function to display the details of hommc object} \usage{ \S4method{show}{hommc}(object) } \arguments{ \item{object}{An object of class hommc} } \description{ This is a convenience function to display the slots of hommc object in proper format } markovchain/man/getName.Rd0000644000176200001440000000142415137702633015206 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \name{name} \alias{name} \alias{name,markovchain-method} \title{Method to retrieve name of markovchain object} \usage{ name(object) \S4method{name}{markovchain}(object) } \arguments{ \item{object}{A markovchain object} } \description{ This method returns the name of a markovchain object } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames=list(statesNames,statesNames)), name = "A markovchain Object" ) name(markovB) } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/ctmcFit.Rd0000644000176200001440000000257515137702633015227 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{ctmcFit} \alias{ctmcFit} \title{Function to fit a CTMC} \usage{ ctmcFit(data, byrow = TRUE, name = "", confidencelevel = 0.95) } \arguments{ \item{data}{It is a list of two elements. The first element is a character vector denoting the states. The second is a numeric vector denoting the corresponding transition times.} \item{byrow}{Determines if the output transition probabilities of the underlying embedded DTMC are by row.} \item{name}{Optional name for the CTMC.} \item{confidencelevel}{Confidence level for the confidence interval construnction.} } \value{ It returns a list containing the CTMC object and the confidence intervals. } \description{ This function fits the underlying CTMC give the state transition data and the transition times using the maximum likelihood method (MLE) } \details{ Note that in data, there must exist an element wise corresponding between the two elements of the list and that data[[2]][1] is always 0. } \examples{ data <- list(c("a", "b", "c", "a", "b", "a", "c", "b", "c"), c(0, 0.8, 2.1, 2.4, 4, 5, 5.9, 8.2, 9)) ctmcFit(data) } \references{ Continuous Time Markov Chains (vignette), Sai Bhargav Yalamanchi, Giorgio Alfredo Spedicato 2015 } \seealso{ \code{\link{rctmc}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/tm_abs.Rd0000644000176200001440000000131115137702633015066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{tm_abs} \alias{tm_abs} \title{Single Year Corporate Credit Rating Transititions} \format{ The format is: num [1:8, 1:8] 17 2 0 0 0 0 0 0 1 455 ... - attr(*, "dimnames")=List of 2 ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... ..$ : chr [1:8] "AAA" "AA" "A" "BBB" ... } \usage{ data(tm_abs) } \description{ Matrix of Standard and Poor's Global Corporate Rating Transition Frequencies 2000 (NR Removed) } \examples{ data(tm_abs) } \references{ European Securities and Markets Authority, 2016 https://cerep.esma.europa.eu/cerep-web/statistics/transitionMatrice.xhtml } \keyword{datasets} markovchain/man/meanRecurrenceTime.Rd0000644000176200001440000000215015137702633017400 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanRecurrenceTime} \alias{meanRecurrenceTime} \title{Mean recurrence time} \usage{ meanRecurrenceTime(object) } \arguments{ \item{object}{the markovchain object} } \value{ For a Markov chain it outputs is a named vector with the expected time to first return to a state when the chain starts there. States present in the vector are only the recurrent ones. If the matrix is ergodic (i.e. irreducible), then all states are present in the output and order is the same as states order for the Markov chain } \description{ Computes the expected time to return to a recurrent state in case the Markov chain starts there } \examples{ m <- matrix(1 / 10 * c(6,3,1, 2,3,5, 4,1,5), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = c("s","c","r"), transitionMatrix = m) meanRecurrenceTime(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/is.TimeReversible.Rd0000644000176200001440000000201415137702633017155 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{is.TimeReversible} \alias{is.TimeReversible} \title{checks if ctmc object is time reversible} \usage{ is.TimeReversible(ctmc) } \arguments{ \item{ctmc}{a ctmc-class object} } \value{ Returns a boolean value stating whether ctmc object is time reversible a boolean value as described above } \description{ The function returns checks if provided function is time reversible } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") is.TimeReversible(molecularCTMC) } \references{ INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley } \author{ Vandit Jain } markovchain/man/multinomialConfidenceIntervals.Rd0000644000176200001440000000237315137702633022032 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fittingFunctions.R \name{multinomialConfidenceIntervals} \alias{multinomialConfidenceIntervals} \title{A function to compute multinomial confidence intervals of DTMC} \usage{ multinomialConfidenceIntervals( transitionMatrix, countsTransitionMatrix, confidencelevel = 0.95 ) } \arguments{ \item{transitionMatrix}{An estimated transition matrix.} \item{countsTransitionMatrix}{Empirical (conts) transition matrix, on which the \code{transitionMatrix} was performed.} \item{confidencelevel}{confidence interval level.} } \value{ Two matrices containing the confidence intervals. } \description{ Return estimated transition matrix assuming a Multinomial Distribution } \examples{ seq<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcfit<-markovchainFit(data=seq,byrow=TRUE) seqmat<-createSequenceMatrix(seq) multinomialConfidenceIntervals(mcfit$estimate@transitionMatrix, seqmat, 0.95) } \references{ Constructing two-sided simultaneous confidence intervals for multinomial proportions for small counts in a large number of cells. Journal of Statistical Software 5(6) (2000) } \seealso{ \code{markovchainFit} } markovchain/man/ExpectedTime.Rd0000644000176200001440000000206615137702633016211 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{ExpectedTime} \alias{ExpectedTime} \title{Returns expected hitting time from state i to state j} \usage{ ExpectedTime(C,i,j,useRCpp) } \arguments{ \item{C}{A CTMC S4 object} \item{i}{Initial state i} \item{j}{Final state j} \item{useRCpp}{logical whether to use Rcpp} } \value{ A numerical value that returns expected hitting times from i to j } \description{ Returns expected hitting time from state i to state j } \details{ According to the theorem, holding times for all states except j should be greater than 0. } \examples{ states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") ExpectedTime(ctmc,1,4,TRUE) } \references{ Markovchains, J. R. Norris, Cambridge University Press } \author{ Vandit Jain } markovchain/man/markovchain-class.Rd0000644000176200001440000002105515137702633017235 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \docType{class} \name{markovchain-class} \alias{markovchain-class} \alias{*,markovchain,markovchain-method} \alias{*,markovchain,matrix-method} \alias{*,markovchain,numeric-method} \alias{*,matrix,markovchain-method} \alias{*,numeric,markovchain-method} \alias{==,markovchain,markovchain-method} \alias{!=,markovchain,markovchain-method} \alias{absorbingStates,markovchain-method} \alias{transientStates,markovchain-method} \alias{recurrentStates,markovchain-method} \alias{transientClasses,markovchain-method} \alias{recurrentClasses,markovchain-method} \alias{communicatingClasses,markovchain-method} \alias{steadyStates,markovchain-method} \alias{meanNumVisits,markovchain-method} \alias{is.regular,markovchain-method} \alias{is.irreducible,markovchain-method} \alias{is.accessible,markovchain,character,character-method} \alias{is.accessible,markovchain,missing,missing-method} \alias{absorptionProbabilities,markovchain-method} \alias{meanFirstPassageTime,markovchain,character-method} \alias{meanFirstPassageTime,markovchain,missing-method} \alias{meanAbsorptionTime,markovchain-method} \alias{meanRecurrenceTime,markovchain-method} \alias{conditionalDistribution,markovchain-method} \alias{hittingProbabilities,markovchain-method} \alias{canonicForm,markovchain-method} \alias{coerce,data.frame,markovchain-method} \alias{coerce,markovchain,data.frame-method} \alias{coerce,table,markovchain-method} \alias{coerce,markovchain,igraph-method} \alias{coerce,markovchain,matrix-method} \alias{coerce,markovchain,sparseMatrix-method} \alias{coerce,sparseMatrix,markovchain-method} \alias{coerce,matrix,markovchain-method} \alias{coerce,Matrix,markovchain-method} \alias{coerce,msm,markovchain-method} \alias{coerce,msm.est,markovchain-method} \alias{coerce,etm,markovchain-method} \alias{dim,markovchain-method} \alias{initialize,markovchain-method} \alias{names<-,markovchain-method} \alias{plot,markovchain,missing-method} \alias{predict,markovchain-method} \alias{print,markovchain-method} \alias{show,markovchain-method} \alias{summary,markovchain-method} \alias{sort,markovchain-method} \alias{t,markovchain-method} \alias{[,markovchain,ANY,ANY,ANY-method} \alias{^,markovchain,numeric-method} \title{Markov Chain class} \arguments{ \item{states}{Name of the states. Must be the same of \code{colnames} and \code{rownames} of the transition matrix} \item{byrow}{TRUE or FALSE indicating whether the supplied matrix is either stochastic by rows or by columns} \item{transitionMatrix}{Square transition matrix} \item{name}{Optional character name of the Markov chain} } \description{ The S4 class that describes \code{markovchain} objects. } \note{ \enumerate{ \item \code{markovchain} object are backed by S4 Classes. \item Validation method is used to assess whether either columns or rows totals to one. Rounding is used up to \code{.Machine$double.eps * 100}. If state names are not properly defined for a probability \code{matrix}, coercing to \code{markovchain} object leads to overriding states name with artificial "s1", "s2", ... sequence. In addition, operator overloading has been applied for \eqn{+,*,^,==,!=} operators. } } \section{Creation of objects}{ Objects can be created by calls of the form \code{new("markovchain", states, byrow, transitionMatrix, ...)}. } \section{Methods}{ \describe{ \item{*}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: multiply two \code{markovchain} objects} \item{*}{\code{signature(e1 = "markovchain", e2 = "matrix")}: markovchain by matrix multiplication} \item{*}{\code{signature(e1 = "markovchain", e2 = "numeric")}: markovchain by numeric vector multiplication } \item{*}{\code{signature(e1 = "matrix", e2 = "markovchain")}: matrix by markov chain} \item{*}{\code{signature(e1 = "numeric", e2 = "markovchain")}: numeric vector by \code{markovchain} multiplication } \item{[}{\code{signature(x = "markovchain", i = "ANY", j = "ANY", drop = "ANY")}: ... } \item{^}{\code{signature(e1 = "markovchain", e2 = "numeric")}: power of a \code{markovchain} object} \item{==}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: equality of two \code{markovchain} object} \item{!=}{\code{signature(e1 = "markovchain", e2 = "markovchain")}: non-equality of two \code{markovchain} object} \item{absorbingStates}{\code{signature(object = "markovchain")}: method to get absorbing states } \item{canonicForm}{\code{signature(object = "markovchain")}: return a \code{markovchain} object into canonic form } \item{coerce}{\code{signature(from = "markovchain", to = "data.frame")}: coerce method from markovchain to \code{data.frame}} \item{conditionalDistribution}{\code{signature(object = "markovchain")}: returns the conditional probability of subsequent states given a state} \item{coerce}{\code{signature(from = "data.frame", to = "markovchain")}: coerce method from \code{data.frame} to \code{markovchain}} \item{coerce}{\code{signature(from = "table", to = "markovchain")}: coerce method from \code{table} to \code{markovchain} } \item{coerce}{\code{signature(from = "msm", to = "markovchain")}: coerce method from \code{msm} to \code{markovchain} } \item{coerce}{\code{signature(from = "msm.est", to = "markovchain")}: coerce method from \code{msm.est} (but only from a Probability Matrix) to \code{markovchain} } \item{coerce}{\code{signature(from = "etm", to = "markovchain")}: coerce method from \code{etm} to \code{markovchain} } \item{coerce}{\code{signature(from = "sparseMatrix", to = "markovchain")}: coerce method from \code{sparseMatrix} to \code{markovchain} } \item{coerce}{\code{signature(from = "markovchain", to = "igraph")}: coercing to \code{igraph} objects } \item{coerce}{\code{signature(from = "markovchain", to = "matrix")}: coercing to \code{matrix} objects } \item{coerce}{\code{signature(from = "markovchain", to = "sparseMatrix")}: coercing to \code{sparseMatrix} objects } \item{coerce}{\code{signature(from = "matrix", to = "markovchain")}: coercing to \code{markovchain} objects from \code{matrix} one } \item{dim}{\code{signature(x = "markovchain")}: method to get the size} \item{names}{\code{signature(x = "markovchain")}: method to get the names of states} \item{names<-}{\code{signature(x = "markovchain", value = "character")}: method to set the names of states} \item{initialize}{\code{signature(.Object = "markovchain")}: initialize method } \item{plot}{\code{signature(x = "markovchain", y = "missing")}: plot method for \code{markovchain} objects } \item{predict}{\code{signature(object = "markovchain")}: predict method } \item{print}{\code{signature(x = "markovchain")}: print method. } \item{show}{\code{signature(object = "markovchain")}: show method. } \item{sort}{\code{signature(x = "markovchain", decreasing=FALSE)}: sorting the transition matrix. } \item{states}{\code{signature(object = "markovchain")}: returns the names of states (as \code{names}. } \item{steadyStates}{\code{signature(object = "markovchain")}: method to get the steady vector. } \item{summary}{\code{signature(object = "markovchain")}: method to summarize structure of the markov chain } \item{transientStates}{\code{signature(object = "markovchain")}: method to get the transient states. } \item{t}{\code{signature(x = "markovchain")}: transpose matrix } \item{transitionProbability}{\code{signature(object = "markovchain")}: transition probability } } } \examples{ #show markovchain definition showClass("markovchain") #create a simple Markov chain transMatr<-matrix(c(0.4,0.6,.3,.7),nrow=2,byrow=TRUE) simpleMc<-new("markovchain", states=c("a","b"), transitionMatrix=transMatr, name="simpleMc") #power simpleMc^4 #some methods steadyStates(simpleMc) absorbingStates(simpleMc) simpleMc[2,1] t(simpleMc) is.irreducible(simpleMc) #conditional distributions conditionalDistribution(simpleMc, "b") #example for predict method sequence<-c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") mcFit<-markovchainFit(data=sequence) predict(mcFit$estimate, newdata="b",n.ahead=3) #direct conversion myMc<-as(transMatr, "markovchain") #example of summary summary(simpleMc) \dontrun{plot(simpleMc)} } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\link{markovchainSequence}},\code{\link{markovchainFit}} } \author{ Giorgio Spedicato } \keyword{classes} markovchain/man/meanAbsorptionTime.Rd0000644000176200001440000000174015137702633017427 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{meanAbsorptionTime} \alias{meanAbsorptionTime} \title{Mean absorption time} \usage{ meanAbsorptionTime(object) } \arguments{ \item{object}{the markovchain object} } \value{ A named vector with the expected number of steps to go from a transient state to any of the recurrent ones } \description{ Computes the expected number of steps to go from any of the transient states to any of the recurrent states. The Markov chain should have at least one transient state for this method to work } \examples{ m <- matrix(c(1/2, 1/2, 0, 1/2, 1/2, 0, 0, 1/2, 1/2), ncol = 3, byrow = TRUE) mc <- new("markovchain", states = letters[1:3], transitionMatrix = m) times <- meanAbsorptionTime(mc) } \references{ C. M. Grinstead and J. L. Snell. Introduction to Probability. American Mathematical Soc., 2012. } \author{ Ignacio Cordón } markovchain/man/markovchainFit.Rd0000644000176200001440000001116015137702633016571 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{createSequenceMatrix} \alias{createSequenceMatrix} \alias{markovchainFit} \title{Function to fit a discrete Markov chain} \usage{ createSequenceMatrix( stringchar, toRowProbs = FALSE, sanitize = FALSE, possibleStates = character() ) markovchainFit( data, method = "mle", byrow = TRUE, nboot = 10L, laplacian = 0, name = "", parallel = FALSE, confidencelevel = 0.95, confint = TRUE, hyperparam = matrix(), sanitize = FALSE, possibleStates = character() ) } \arguments{ \item{stringchar}{It can be a \deqn{n x n} matrix or a character vector or a list} \item{toRowProbs}{converts a sequence matrix into a probability matrix} \item{sanitize}{put 1 in all rows having rowSum equal to zero} \item{possibleStates}{Possible states which are not present in the given sequence} \item{data}{It can be a character vector or a \deqn{n x n} matrix or a \deqn{n x n} data frame or a list} \item{method}{Method used to estimate the Markov chain. Either "mle", "map", "bootstrap" or "laplace"} \item{byrow}{it tells whether the output Markov chain should show the transition probabilities by row.} \item{nboot}{Number of bootstrap replicates in case "bootstrap" is used.} \item{laplacian}{Laplacian smoothing parameter, default zero. It is only used when "laplace" method is chosen.} \item{name}{Optional character for name slot.} \item{parallel}{Use parallel processing when performing Boostrap estimates.} \item{confidencelevel}{\deqn{\alpha} level for conficence intervals width. Used only when \code{method} equal to "mle".} \item{confint}{a boolean to decide whether to compute Confidence Interval or not.} \item{hyperparam}{Hyperparameter matrix for the a priori distribution. If none is provided, default value of 1 is assigned to each parameter. This must be of size \deqn{k x k} where k is the number of states in the chain and the values should typically be non-negative integers.} } \value{ A list containing an estimate, log-likelihood, and, when "bootstrap" method is used, a matrix of standards deviations and the bootstrap samples. When the "mle", "bootstrap" or "map" method is used, the lower and upper confidence bounds are returned along with the standard error. The "map" method also returns the expected value of the parameters with respect to the posterior distribution. } \description{ Given a sequence of states arising from a stationary state, it fits the underlying Markov chain distribution using either MLE (also using a Laplacian smoother), bootstrap or by MAP (Bayesian) inference. } \details{ Disabling confint would lower the computation time on large datasets. If \code{data} or \code{stringchar} contain \code{NAs}, the related \code{NA} containing transitions will be ignored. } \note{ This function has been rewritten in Rcpp. Bootstrap algorithm has been defined "heuristically". In addition, parallel facility is not complete, involving only a part of the bootstrap process. When \code{data} is either a \code{data.frame} or a \code{matrix} object, only MLE fit is currently available. } \examples{ sequence <- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") sequenceMatr <- createSequenceMatrix(sequence, sanitize = FALSE) mcFitMLE <- markovchainFit(data = sequence) mcFitBSP <- markovchainFit(data = sequence, method = "bootstrap", nboot = 5, name = "Bootstrap Mc") na.sequence <- c("a", NA, "a", "b") # There will be only a (a,b) transition na.sequenceMatr <- createSequenceMatrix(na.sequence, sanitize = FALSE) mcFitMLE <- markovchainFit(data = na.sequence) # data can be a list of character vectors sequences <- list(x = c("a", "b", "a"), y = c("b", "a", "b", "a", "c")) mcFitMap <- markovchainFit(sequences, method = "map") mcFitMle <- markovchainFit(sequences, method = "mle") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, Alfred Hubler, Santa Fe Institute Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainSequence}}, \code{\link{markovchainListFit}} } \author{ Giorgio Spedicato, Tae Seung Kang, Sai Bhargav Yalamanchi } markovchain/man/expectedRewardsBeforeHittingA.Rd0000644000176200001440000000203015137702633021524 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{expectedRewardsBeforeHittingA} \alias{expectedRewardsBeforeHittingA} \title{Expected first passage Rewards for a set of states in a markovchain} \usage{ expectedRewardsBeforeHittingA(markovchain, A, state, rewards, n) } \arguments{ \item{markovchain}{the markovchain-class object} \item{A}{set of states for first passage expected reward} \item{state}{initial state} \item{rewards}{vector depicting rewards coressponding to states} \item{n}{no of steps of the process} } \value{ returns a expected reward (numerical value) as described above } \description{ Given a markovchain object and reward values for every state, function calculates expected reward value for a set A of states after n steps. } \details{ The function returns the value of expected first passage rewards given rewards coressponding to every state, an initial state and number of steps. } \author{ Sai Bhargav Yalamanchi, Vandit Jain } markovchain/man/ctmc-class.Rd0000644000176200001440000000455515137702633015667 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcClassesAndMethods.R \docType{class} \name{ctmc-class} \alias{ctmc-class} \alias{dim,ctmc-method} \alias{initialize,ctmc_method} \alias{states,ctmc-method} \alias{steadyStates,ctmc-method} \alias{plot,ctmc,missing-method} \title{Continuous time Markov Chains class} \arguments{ \item{states}{Name of the states. Must be the same of \code{colnames} and \code{rownames} of the generator matrix} \item{byrow}{TRUE or FALSE. Indicates whether the given matrix is stochastic by rows or by columns} \item{generator}{Square generator matrix} \item{name}{Optional character name of the Markov chain} } \description{ The S4 class that describes \code{ctmc} (continuous time Markov chain) objects. } \note{ \enumerate{ \item \code{ctmc} classes are written using S4 classes \item Validation method is used to assess whether either columns or rows totals to zero. Rounding is used up to 5th decimal. If state names are not properly defined for a generator \code{matrix}, coercing to \code{ctmc} object leads to overriding states name with artificial "s1", "s2", ... sequence } } \section{Methods}{ \describe{ \item{dim}{\code{signature(x = "ctmc")}: method to get the size} \item{initialize}{\code{signature(.Object = "ctmc")}: initialize method } \item{states}{\code{signature(object = "ctmc")}: states method. } \item{steadyStates}{\code{signature(object = "ctmc")}: method to get the steady state vector. } \item{plot}{\code{signature(x = "ctmc", y = "missing")}: plot method for \code{ctmc} objects } } } \examples{ energyStates <- c("sigma", "sigma_star") byRow <- TRUE gen <- matrix(data = c(-3, 3, 1, -1), nrow = 2, byrow = byRow, dimnames = list(energyStates, energyStates)) molecularCTMC <- new("ctmc", states = energyStates, byrow = byRow, generator = gen, name = "Molecular Transition Model") steadyStates(molecularCTMC) \dontrun{plot(molecularCTMC)} } \references{ Introduction to Stochastic Processes with Applications in the Biosciences (2013), David F. Anderson, University of Wisconsin at Madison. Sai Bhargav Yalamanchi, Giorgio Spedicato } \seealso{ \code{\link{generatorToTransitionMatrix}},\code{\link{rctmc}} } \keyword{classes} markovchain/man/firstPassageMultiple.Rd0000644000176200001440000000236715137702633020004 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{firstPassageMultiple} \alias{firstPassageMultiple} \title{function to calculate first passage probabilities} \usage{ firstPassageMultiple(object, state, set, n) } \arguments{ \item{object}{a markovchain-class object} \item{state}{intital state of the process (charactervector)} \item{set}{set of states A, first passage of which is to be calculated} \item{n}{Number of rows on which compute the distribution} } \value{ A vector of size n showing the first time proabilities } \description{ The function calculates first passage probability for a subset of states given an initial state. } \examples{ statesNames <- c("a", "b", "c") markovB <- new("markovchain", states = statesNames, transitionMatrix = matrix(c(0.2, 0.5, 0.3, 0, 1, 0, 0.1, 0.8, 0.1), nrow = 3, byrow = TRUE, dimnames = list(statesNames, statesNames) )) firstPassageMultiple(markovB,"a",c("b","c"),4) } \references{ Renaldo Feres, Notes for Math 450 Matlab listings for Markov chains; MIT OCW, course - 6.262, Discrete Stochastic Processes, course-notes, chap -05 } \seealso{ \code{\link{firstPassage}} } \author{ Vandit Jain } markovchain/man/ones.Rd0000644000176200001440000000047215137702633014574 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/matlab_package_functions.R \name{ones} \alias{ones} \title{Returns an Identity matrix} \usage{ ones(n) } \arguments{ \item{n}{size of the matrix} } \value{ a identity matrix } \description{ Returns an Identity matrix } markovchain/man/zeros.Rd0000644000176200001440000000047415137702633014774 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/matlab_package_functions.R \name{zeros} \alias{zeros} \title{Matrix to create zeros} \usage{ zeros(n) } \arguments{ \item{n}{size of the matrix} } \value{ a square matrix of zeros } \description{ Matrix to create zeros } markovchain/man/inferHyperparam.Rd0000644000176200001440000000440315137702633016762 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{inferHyperparam} \alias{inferHyperparam} \title{Function to infer the hyperparameters for Bayesian inference from an a priori matrix or a data set} \usage{ inferHyperparam(transMatr = matrix(), scale = numeric(), data = character()) } \arguments{ \item{transMatr}{A valid transition matrix, with dimension names.} \item{scale}{A vector of scaling factors, each element corresponds to the row names of the provided transition matrix transMatr, in the same order.} \item{data}{A data set from which the hyperparameters are inferred.} } \value{ Returns the hyperparameter matrix in a list. } \description{ Since the Bayesian inference approach implemented in the package is based on conjugate priors, hyperparameters must be provided to model the prior probability distribution of the chain parameters. The hyperparameters are inferred from a given a priori matrix under the assumption that the matrix provided corresponds to the mean (expected) values of the chain parameters. A scaling factor vector must be provided too. Alternatively, the hyperparameters can be inferred from a data set. } \details{ transMatr and scale need not be provided if data is provided. } \note{ The hyperparameter matrix returned is such that the row and column names are sorted alphanumerically, and the elements in the matrix are correspondingly permuted. } \examples{ data(rain, package = "markovchain") inferHyperparam(data = rain$rain) weatherStates <- c("sunny", "cloudy", "rain") weatherMatrix <- matrix(data = c(0.7, 0.2, 0.1, 0.3, 0.4, 0.3, 0.2, 0.4, 0.4), byrow = TRUE, nrow = 3, dimnames = list(weatherStates, weatherStates)) inferHyperparam(transMatr = weatherMatrix, scale = c(10, 10, 10)) } \references{ Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainFit}}, \code{\link{predictiveDistribution}} } \author{ Sai Bhargav Yalamanchi, Giorgio Spedicato } markovchain/man/probabilityatT.Rd0000644000176200001440000000243615137702633016623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcProbabilistic.R \name{probabilityatT} \alias{probabilityatT} \title{Calculating probability from a ctmc object} \usage{ probabilityatT(C,t,x0,useRCpp) } \arguments{ \item{C}{A CTMC S4 object} \item{t}{final time t} \item{x0}{initial state} \item{useRCpp}{logical whether to use RCpp implementation} } \value{ returns a vector or a matrix in case \code{x0} is provided or not respectively. } \description{ This function returns the probability of every state at time t under different conditions } \details{ The initial state is not mandatory, In case it is not provided, function returns a matrix of transition function at time \code{t} else it returns vector of probaabilities of transition to different states if initial state was \code{x0} } \examples{ states <- c("a","b","c","d") byRow <- TRUE gen <- matrix(data = c(-1, 1/2, 1/2, 0, 1/4, -1/2, 0, 1/4, 1/6, 0, -1/3, 1/6, 0, 0, 0, 0), nrow = 4,byrow = byRow, dimnames = list(states,states)) ctmc <- new("ctmc",states = states, byrow = byRow, generator = gen, name = "testctmc") probabilityatT(ctmc,1,useRCpp = TRUE) } \references{ INTRODUCTION TO STOCHASTIC PROCESSES WITH R, ROBERT P. DOBROW, Wiley } \author{ Vandit Jain } markovchain/man/predictiveDistribution.Rd0000644000176200001440000000406715137702633020372 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{predictiveDistribution} \alias{predictiveDistribution} \title{predictiveDistribution} \usage{ predictiveDistribution(stringchar, newData, hyperparam = matrix()) } \arguments{ \item{stringchar}{This is the data using which the Bayesian inference is performed.} \item{newData}{This is the data whose predictive probability is computed.} \item{hyperparam}{This determines the shape of the prior distribution of the parameters. If none is provided, default value of 1 is assigned to each parameter. This must be of size kxk where k is the number of states in the chain and the values should typically be non-negative integers.} } \value{ The log of the probability is returned. } \description{ The function computes the probability of observing a new data set, given a data set } \details{ The underlying method is Bayesian inference. The probability is computed by averaging the likelihood of the new data with respect to the posterior. Since the method assumes conjugate priors, the result can be represented in a closed form (see the vignette for more details), which is what is returned. } \examples{ sequence<- c("a", "b", "a", "a", "a", "a", "b", "a", "b", "a", "b", "a", "a", "b", "b", "b", "a") hyperMatrix<-matrix(c(1, 2, 1, 4), nrow = 2,dimnames=list(c("a","b"),c("a","b"))) predProb <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix ) hyperMatrix2<-hyperMatrix[c(2,1),c(2,1)] predProb2 <- predictiveDistribution(sequence[1:10], sequence[11:17], hyperparam =hyperMatrix2 ) predProb2==predProb } \references{ Inferring Markov Chains: Bayesian Estimation, Model Comparison, Entropy Rate, and Out-of-Class Modeling, Christopher C. Strelioff, James P. Crutchfield, Alfred Hubler, Santa Fe Institute Yalamanchi SB, Spedicato GA (2015). Bayesian Inference of First Order Markov Chains. R package version 0.2.5 } \seealso{ \code{\link{markovchainFit}} } \author{ Sai Bhargav Yalamanchi } markovchain/man/markovchainList-class.Rd0000644000176200001440000000516715137702633020077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/classesAndMethods.R \docType{class} \name{markovchainList-class} \alias{markovchainList-class} \alias{[[,markovchainList-method} \alias{dim,markovchainList-method} \alias{predict,markovchainList-method} \alias{print,markovchainList-method} \alias{show,markovchainList-method} \title{Non homogeneus discrete time Markov Chains class} \arguments{ \item{markovchains}{Object of class \code{"list"}: a list of markovchains} \item{name}{Object of class \code{"character"}: optional name of the class} } \description{ A class to handle non homogeneous discrete Markov chains } \note{ The class consists in a list of \code{markovchain} objects. It is aimed at working with non homogeneous Markov chains. } \section{Objects from the Class}{ A \code{markovchainlist} is a list of \code{markovchain} objects. They can be used to model non homogeneous discrete time Markov Chains, when transition probabilities (and possible states) change by time. } \section{Methods}{ \describe{ \item{[[}{\code{signature(x = "markovchainList")}: extract the i-th \code{markovchain} } \item{dim}{\code{signature(x = "markovchainList")}: number of \code{markovchain} underlying the matrix } \item{predict}{\code{signature(object = "markovchainList")}: predict from a \code{markovchainList} } \item{print}{\code{signature(x = "markovchainList")}: prints the list of markovchains } \item{show}{\code{signature(object = "markovchainList")}: same as \code{print} } } } \examples{ showClass("markovchainList") #define a markovchainList statesNames=c("a","b") mcA<-new("markovchain",name="MCA", transitionMatrix=matrix(c(0.7,0.3,0.1,0.9), byrow=TRUE, nrow=2, dimnames=list(statesNames,statesNames)) ) mcB<-new("markovchain", states=c("a","b","c"), name="MCB", transitionMatrix=matrix(c(0.2,0.5,0.3,0,1,0,0.1,0.8,0.1), nrow=3, byrow=TRUE)) mcC<-new("markovchain", states=c("a","b","c","d"), name="MCC", transitionMatrix=matrix(c(0.25,0.75,0,0,0.4,0.6, 0,0,0,0,0.1,0.9,0,0,0.7,0.3), nrow=4, byrow=TRUE) ) mcList<-new("markovchainList",markovchains=list(mcA, mcB, mcC), name="Non - homogeneous Markov Chain") } \references{ A First Course in Probability (8th Edition), Sheldon Ross, Prentice Hall 2010 } \seealso{ \code{\linkS4class{markovchain}} } \author{ Giorgio Spedicato } \keyword{classes} markovchain/man/ictmc-class.Rd0000644000176200001440000000122115137702633016023 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ctmcClassesAndMethods.R \docType{class} \name{ictmc-class} \alias{ictmc-class} \alias{ictmc} \title{An S4 class for representing Imprecise Continuous Time Markovchains} \description{ An S4 class for representing Imprecise Continuous Time Markovchains } \section{Slots}{ \describe{ \item{\code{states}}{a vector of states present in the ICTMC model} \item{\code{Q}}{matrix representing the generator demonstrated in the form of variables} \item{\code{range}}{a matrix that stores values of range of variables} \item{\code{name}}{name given to ICTMC} }} markovchain/man/fitHighOrderMultivarMC.Rd0000644000176200001440000000205415137702633020150 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \name{fitHighOrderMultivarMC} \alias{fitHighOrderMultivarMC} \title{Function to fit Higher Order Multivariate Markov chain} \usage{ fitHighOrderMultivarMC(seqMat, order = 2, Norm = 2) } \arguments{ \item{seqMat}{a matrix or a data frame where each column is a categorical sequence} \item{order}{Multivariate Markov chain order. Default is 2.} \item{Norm}{Norm to be used. Default is 2.} } \value{ an hommc object } \description{ Given a matrix of categorical sequences it fits Higher Order Multivariate Markov chain. } \examples{ data <- matrix(c('2', '1', '3', '3', '4', '3', '2', '1', '3', '3', '2', '1', c('2', '4', '4', '4', '4', '2', '3', '3', '1', '4', '3', '3')), ncol = 2, byrow = FALSE) fitHighOrderMultivarMC(data, order = 2, Norm = 2) } \references{ W.-K. Ching et al. / Linear Algebra and its Applications } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/blanden.Rd0000644000176200001440000000150715137702633015233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{blanden} \alias{blanden} \title{Mobility between income quartiles} \format{ An object of class \code{table} with 4 rows and 4 columns. } \source{ Personal reworking } \usage{ data(blanden) } \description{ This table show mobility between income quartiles for father and sons for the 1970 cohort born } \details{ The rows represent fathers' income quartile when the son is aged 16, whilst the columns represent sons' income quartiles when he is aged 30 (in 2000). } \examples{ data(blanden) mobilityMc<-as(blanden, "markovchain") } \references{ Jo Blanden, Paul Gregg and Stephen Machin, Intergenerational Mobility in Europe and North America, Center for Economic Performances (2005) } \keyword{datasets} markovchain/man/hommc-class.Rd0000644000176200001440000000254415137702633016040 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hommc.R \docType{class} \name{hommc-class} \alias{hommc-class} \alias{hommc} \title{An S4 class for representing High Order Multivariate Markovchain (HOMMC)} \description{ An S4 class for representing High Order Multivariate Markovchain (HOMMC) } \section{Slots}{ \describe{ \item{\code{order}}{an integer equal to order of Multivariate Markovchain} \item{\code{states}}{a vector of states present in the HOMMC model} \item{\code{P}}{array of transition matrices} \item{\code{Lambda}}{a vector which stores the weightage of each transition matrices in P} \item{\code{byrow}}{if FALSE each column sum of transition matrix is 1 else row sum = 1} \item{\code{name}}{a name given to hommc} }} \examples{ statesName <- c("a", "b") P <- array(0, dim = c(2, 2, 4), dimnames = list(statesName, statesName)) P[,,1] <- matrix(c(0, 1, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,2] <- matrix(c(1/4, 3/4, 0, 1), byrow = FALSE, nrow = 2) P[,,3] <- matrix(c(1, 0, 1/3, 2/3), byrow = FALSE, nrow = 2) P[,,4] <- matrix(c(3/4, 1/4, 0, 1), byrow = FALSE, nrow = 2) Lambda <- c(0.8, 0.2, 0.3, 0.7) ob <- new("hommc", order = 1, states = statesName, P = P, Lambda = Lambda, byrow = FALSE, name = "FOMMC") } \author{ Giorgio Spedicato, Deepak Yadav } markovchain/man/is.irreducible.Rd0000644000176200001440000000177515137702633016542 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{is.irreducible} \alias{is.irreducible} \title{Function to check if a Markov chain is irreducible (i.e. ergodic)} \usage{ is.irreducible(object) } \arguments{ \item{object}{A \code{markovchain} object} } \value{ A boolean values. } \description{ This function verifies whether a \code{markovchain} object transition matrix is composed by only one communicating class. } \details{ It is based on \code{.communicatingClasses} internal function. } \examples{ statesNames <- c("a", "b") mcA <- new("markovchain", transitionMatrix = matrix(c(0.7,0.3,0.1,0.9), byrow = TRUE, nrow = 2, dimnames = list(statesNames, statesNames) )) is.irreducible(mcA) } \references{ Feres, Matlab listings for Markov Chains. } \seealso{ \code{\link{summary}} } \author{ Giorgio Spedicato } markovchain/man/committorAB.Rd0000644000176200001440000000220315137702633016042 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/probabilistic.R \name{committorAB} \alias{committorAB} \title{Calculates committor of a markovchain object with respect to set A, B} \usage{ committorAB(object,A,B,p) } \arguments{ \item{object}{a markovchain class object} \item{A}{a set of states} \item{B}{a set of states} \item{p}{initial state (default value : 1)} } \value{ Return a vector of probabilities in case initial state is not provided else returns a number } \description{ Returns the probability of hitting states rom set A before set B with different initial states } \details{ The function solves a system of linear equations to calculate probaility that the process hits a state from set A before any state from set B } \examples{ transMatr <- matrix(c(0,0,0,1,0.5, 0.5,0,0,0,0, 0.5,0,0,0,0, 0,0.2,0.4,0,0, 0,0.8,0.6,0,0.5), nrow = 5) object <- new("markovchain", states=c("a","b","c","d","e"),transitionMatrix=transMatr) committorAB(object,c(5),c(3)) } markovchain/DESCRIPTION0000644000176200001440000000565715140042215014271 0ustar liggesusersPackage: markovchain Type: Package Title: Easy Handling Discrete Time Markov Chains Version: 0.10.3 Authors@R: c( person("Giorgio Alfredo", "Spedicato", role = c("aut", "cre"), email = "spedicato_giorgio@yahoo.it",comment = c(ORCID = "0000-0002-0315-8888")), person("Tae", "Seung Kang", role = "aut"), person("Sai", "Bhargav Yalamanchi", role = "aut"), person("Mildenberger", "Thoralf", role = "ctb", comment = c(ORCID = "0000-0001-7242-1873")), person("Deepak", "Yadav", role = "aut"), person("Ignacio", "Cordón", role = "aut", comment = c(ORCID = "0000-0002-3152-0231")), person("Vandit", "Jain", role = "ctb"), person("Toni", "Giorgino", role="ctb", comment = c(ORCID = "0000-0001-6449-0596")), person("Richèl J.C.", "Bilderbeek", role = "ctb", comment = c(ORCID = "0000-0003-1107-7049")), person("Daniel", "Ebbert", email = "daniel@ebbert.nrw", role = "ctb", comment = c(ORCID = "0000-0003-3666-7205")), person("Shreyash", "Maheshwari", email = "coolshreysh1000@gmail.com", role = "ctb"), person("Reinhold", "Koch", role = "ctb") ) Maintainer: Giorgio Alfredo Spedicato Description: Functions and S4 methods to create and manage discrete time Markov chains more easily. In addition functions to perform statistical (fitting and drawing random variates) and probabilistic (analysis of their structural proprieties) analysis are provided. See Spedicato (2017) . Some functions for continuous times Markov chains depend on the suggested ctmcd package. License: MIT + file LICENSE Depends: R (>= 4.2.0), Matrix (>= 1.5-0), methods Imports: igraph, expm, stats4, parallel, Rcpp (>= 1.0.2), RcppParallel, utils, stats, grDevices Suggests: knitr, testthat, diagram, DiagrammeR, msm, Rsolnp, rmarkdown, ctmcd, bookdown, rticles, MCMCpack, microbenchmark Enhances: etm VignetteBuilder: rmarkdown, knitr, bookdown, rticles LinkingTo: Rcpp, RcppParallel, RcppArmadillo (>= 0.9.600.4.0) SystemRequirements: GNU make LazyLoad: yes ByteCompile: yes Encoding: UTF-8 BugReports: https://github.com/spedygiorgio/markovchain/issues URL: https://github.com/spedygiorgio/markovchain/ RoxygenNote: 7.3.3 NeedsCompilation: yes Packaged: 2026-02-01 17:45:30 UTC; Utente Author: Giorgio Alfredo Spedicato [aut, cre] (ORCID: ), Tae Seung Kang [aut], Sai Bhargav Yalamanchi [aut], Mildenberger Thoralf [ctb] (ORCID: ), Deepak Yadav [aut], Ignacio Cordón [aut] (ORCID: ), Vandit Jain [ctb], Toni Giorgino [ctb] (ORCID: ), Richèl J.C. Bilderbeek [ctb] (ORCID: ), Daniel Ebbert [ctb] (ORCID: ), Shreyash Maheshwari [ctb], Reinhold Koch [ctb] Repository: CRAN Date/Publication: 2026-02-02 06:30:37 UTC