actuar/0000755000176200001440000000000015151421061011525 5ustar liggesusersactuar/tests/0000755000176200001440000000000015151412457012700 5ustar liggesusersactuar/tests/rmixture-tests.R0000644000176200001440000000742515147745722016063 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Tests for the simulation of discrete mixtures with 'rmixture'. ### ### AUTHOR: Vincent Goulet ## Load the package library(actuar) ## Copy of tools::assertError. assertError <- tools::assertError ## Set common values for the tests n <- 20 bmodels <- expression(rexp(1/20), rlnorm(3.6, 0.6), rpareto(shape = 4, scale = 240)) ## Function to inject the number of variates in an expression and ## evaluate it. f <- function(n, expr) { expr$n <- n eval(expr) } ## Test a "normal" case (with data that is not reshuffled). set.seed(123) probs <- c(2, 3, 5)/10 nj <- rmultinom(1, n, prob = probs) x <- c(f(nj[1], bmodels[[1]]), f(nj[2], bmodels[[2]]), f(nj[3], bmodels[[3]])) set.seed(123) stopifnot(exprs = { identical(x, rmixture(n, probs, bmodels, shuffle = FALSE)) }) ## Test recycling of the probability vector. set.seed(123) probs <- 1 nj <- rmultinom(1, n, prob = rep_len(probs, 3)) x <- c(f(nj[1], bmodels[[1]]), f(nj[2], bmodels[[2]]), f(nj[3], bmodels[[3]])) set.seed(123) stopifnot(exprs = { identical(x, rmixture(n, probs, bmodels, shuffle = FALSE)) }) ## Test recycling of the models vector. set.seed(123) probs <- c(2, 3, 5) nj <- rmultinom(1, n, prob = probs) x <- f(n, bmodels[[1]]) set.seed(123) stopifnot(exprs = { identical(x, rmixture(n, probs, bmodels[1], shuffle = FALSE)) }) ## Test special cases. stopifnot(exprs = { identical(numeric(0), rmixture(0, probs, bmodels)) identical(2L, length(rmixture(c(n, n), probs, bmodels))) }) ## Test the calling environment, that is that arguments are correctly ## identified when 'rmixture' is called inside another function. set.seed(123) probs <- c(2, 3, 5)/10 x <- rmixture(n, probs, bmodels) f <- function(n, p, model) rmixture(n, p, model) g <- function(n, p, m, q) rmixture(n, p, expression(rexp(m[1]), rlnorm(m[2], q[2]), rpareto(m[3], q[3]))) h <- function(n, p, model) f(n, c(p[1], p[2], p[3]), c(model[1], model[2], model[3])) k <- function(n, p, m, q) { ## Pathological case where the models expression does not evaluate ## in the frame of 'rmixture' as 'm' and 'q' will not be bound. ## The fix is to substitute variables by their values. models <- substitute(expression(rexp(m[1]), rlnorm(m[2], q[2]), rpareto(m[3], q[3])), list(m = m, q = q)) f(n, p, eval(models)) } stopifnot(exprs = { identical(x, { set.seed(123) f(n, probs, bmodels) }) identical(x, { set.seed(123) f(n, c(probs[1], probs[2], probs[3]), c(bmodels[1], bmodels[2], bmodels[3])) }) identical(x, { set.seed(123) g(n, p = probs, m = c(eval(bmodels[[c(1, 2)]]), eval(bmodels[[c(2, 2)]]), eval(bmodels[[c(3, 2)]])), q = c(NA, eval(bmodels[[c(2, 3)]]), eval(bmodels[[c(3, 3)]]))) }) identical(x, { set.seed(123) h(n, probs, expression(rexp(eval(bmodels[[c(1, 2)]])), rlnorm(eval(bmodels[[c(2, 2)]]), eval(bmodels[[c(2, 3)]])), rpareto(shape = eval(bmodels[[c(3, 2)]]), scale = eval(bmodels[[c(3, 3)]])))) }) identical(x, { set.seed(123) k(n, p = probs, m = c(eval(bmodels[[c(1, 2)]]), eval(bmodels[[c(2, 2)]]), eval(bmodels[[c(3, 2)]])), q = c(NA, eval(bmodels[[c(2, 3)]]), eval(bmodels[[c(3, 3)]]))) }) }) ## Finally, test invalid arguments. assertError(rmixture(-1, probs, bmodels)) assertError(rmixture(c(3, -1), probs, bmodels)) assertError(rmixture(n, numeric(0), bmodels)) assertError(rmixture(n, 0, bmodels)) assertError(rmixture(n, c(0, 0), bmodels)) assertError(rmixture(n, probs, c(rexp(2), rexp(7)))) actuar/tests/betaint-tests.R0000644000176200001440000000346315147745722015630 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Tests for the "beta integral" ### ### B(a, b; x) = Gamma(a + b) int_0^x t^(a-1) (1 - t)^(b-1) dt ### ### Inspired by (and some parts taken from) `tests/d-p-q-r-tests.R` in ### R sources. ### ### AUTHOR: Vincent Goulet with ### indirect help from the R Core Team ## Load the package library(actuar) ## Define a "local" version of the otherwise non-exported function ## 'betaint'. betaint <- actuar:::betaint ## Special values and utilities. Taken from `tests/d-p-q-r-tests.R`. xMax <- 1 - .Machine$double.eps xMin <- .Machine$double.xmin All.eq <- function(x, y) { all.equal.numeric(x, y, tolerance = 64 * .Machine$double.eps, scale = max(0, mean(abs(x), na.rm = TRUE))) } if(!interactive()) set.seed(123) ## Limiting cases stopifnot(exprs = { !is.finite(betaint(0.3, Inf, 2)) !is.finite(betaint(0.3, Inf, -2.2)) is.nan (betaint(0.3, 0, 2)) !is.finite(betaint(0.3, 2, Inf)) is.nan (betaint(0.3, 2, -2.2)) # a <= 1 + floor(-b) is.nan (betaint(0.3, 2, 0)) }) ## Tests for cases with b > 0 x <- c(xMin, runif(10), xMax) b <- 2 for (a in rlnorm(5, 2)) stopifnot(exprs = { All.eq(betaint(x, a, b), gamma(a) * gamma(b) * pbeta(x, a, b)) }) ## Tests for cases with b < 0 b <- -2.2 r <- floor(-b) # r = 2 for (a in 1 + r + rlnorm(5, 2)) { s <- (x^(a-1) * (1-x)^b)/b + ((a-1) * x^(a-2) * (1-x)^(b+1))/(b * (b+1)) + ((a-1) * (a-2) * x^(a-3) * (1-x)^(b+2))/(b * (b+1) * (b+2)) stopifnot(exprs = { all.equal(betaint(x, a, b), -gamma(a+b) * s + (a-1)*(a-2)*(a-3) * gamma(a-r-1)/(b*(b+1)*(b+2)) * gamma(b+r+1)*pbeta(x, a-r-1, b+r+1)) }) } actuar/tests/rcompound-tests.R0000644000176200001440000001113415147745722016202 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Tests for the simulation of compound models with 'rcompound' and ### 'rcomppois'. ### ### AUTHOR: Vincent Goulet ## Load the package library(actuar) ## Copy of tools::assertError. assertError <- tools::assertError ### ### Tests for rcompound ### ## Test the function itself with various types of arguments. n <- 20 fmodel <- expression(rnbinom(2, 0.8)) smodel <- expression(rgamma(2, 1)) set.seed(123) x <- numeric(n) N <- rnbinom(n, 2, 0.8) y <- rgamma(sum(N), 2, 1) x[which(N != 0)] <- tapply(y, rep(seq_len(n), N), sum) stopifnot(exprs = { identical(x, { set.seed(123) rcompound(n, rnbinom(2, 0.8), rgamma(2, 1)) }) identical(x, { set.seed(123) rcompound(n, rnbinom(2, 0.8), expression(rgamma(2, 1))) }) identical(x, { set.seed(123) rcompound(n, expression(rnbinom(2, 0.8)), rgamma(2, 1)) }) identical(x, { set.seed(123) rcompound(n, fmodel, smodel) }) }) ## Test the calling environment, that is that arguments are correctly ## identified when 'rcompound' is called inside another function. n <- 20 lambda <- 2 smodel <- expression(rgamma(2, 1)) set.seed(123) x <- rcompound(n, rpois(2), rgamma(2, 1)) f <- function(n, p, model.sev) { ## safe way to pass down the arguments model.freq <- substitute(rpois(p), list(p = p)) model.sev <- substitute(model.sev) if (is.name(model.sev)) model.sev <- eval.parent(model.sev) rcompound(n, model.freq, model.sev) } g1 <- function(n, p, s, r) rcompound(n, rpois(p), rgamma(s, r)) g2 <- function(n, p, s, r) rcompound(n, expression(rpois(p)), expression(rgamma(s, r))) h <- function(n, p, model.sev) { ## safe way to pass down the arguments model.sev <- substitute(model.sev) if (is.name(model.sev)) model.sev <- eval.parent(model.sev) f(n, p, model.sev) } stopifnot(exprs = { identical(x, { set.seed(123) f(n, 2, rgamma(2, 1)) }) identical(x, { set.seed(123) f(n, lambda, expression(rgamma(2, 1))) }) identical(x, { set.seed(123) f(n, lambda, smodel) }) identical(x, { set.seed(123) g1(n, lambda, 2, 1) }) identical(x, { set.seed(123) g2(n, lambda, 2, 1) }) identical(x, { set.seed(123) h(n, 2, rgamma(2, 1)) }) identical(x, { set.seed(123) h(n, lambda, smodel) }) }) ## Test invalid arguments. assertError(rcompound(-1, rpois(2), rgamma(2, 1))) ### ### Tests for rcomppois ### ## Test the function itself with various types of arguments. n <- 20 lambda <- 2 smodel <- expression(rgamma(2, 1)) set.seed(123) x <- numeric(n) N <- rpois(n, 2) y <- rgamma(sum(N), 2, 1) x[which(N != 0)] <- tapply(y, rep(seq_len(n), N), sum) stopifnot(exprs = { identical(x, { set.seed(123) rcomppois(n, 2, rgamma(2, 1)) }) identical(x, { set.seed(123) rcomppois(n, lambda, expression(rgamma(2, 1))) }) identical(x, { set.seed(123) rcomppois(n, lambda, smodel) }) }) ## Test the calling environment, that is that arguments are correctly ## identified when 'rcomppois' is called inside another function. n <- 20 lambda <- 2 smodel <- expression(rgamma(2, 1)) set.seed(123) x <- rcomppois(n, lambda, smodel) f <- function(n, p, model) { ## safe way to pass down all sorts of 'model' objects model <- substitute(model) if (is.name(model)) model <- eval.parent(model) rcomppois(n, p, model) } g1 <- function(n, p, s, r) rcomppois(n, p, rgamma(s, r)) g2 <- function(n, p, s, r) rcomppois(n, p, expression(rgamma(s, r))) h <- function(n, p, model) { ## safe way to pass down all sorts of 'model' objects model <- substitute(model) if (is.name(model)) model <- eval.parent(model) f(n, p, model) } stopifnot(exprs = { identical(x, { set.seed(123) f(n, 2, rgamma(2, 1)) }) identical(x, { set.seed(123) f(n, lambda, expression(rgamma(2, 1))) }) identical(x, { set.seed(123) f(n, lambda, smodel) }) identical(x, { set.seed(123) g1(n, 2, 2, 1) }) identical(x, { set.seed(123) g2(n, 2, 2, 1) }) identical(x, { set.seed(123) h(n, 2, rgamma(2, 1)) }) identical(x, { set.seed(123) h(n, lambda, smodel) }) }) ## Test invalid arguments. assertError(rcomppois(-1, lambda, smodel)) assertError(rcomppois(n, -1, smodel)) assertError(rcomppois(n, c(3, -1), smodel)) actuar/tests/dpqr-tests.R0000644000176200001440000032112615151206331015126 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Tests of functions for continuous and discrete probability ### distributions. ### ### Despite the name of the file, the tests are for [dpqrm,lev] ### functions (for continuous distributions): ### ### d: density or probability mass ### p: cumulative distribution ### q: quantile ### r: random number generation ### m: moment ### lev: limited moment ### ### Distributions are classified and sorted as in appendix A and ### appendix B of the 'distributions' package vignette. ### ### Inspired by (and some parts taken from) `tests/d-p-q-r-tests.R` in ### R sources. ### ### AUTHOR: Vincent Goulet with ### indirect help from the R Core Team ## Load the package library(actuar) library(expint) # for gammainc ## Define a "local" version of the otherwise non-exported function ## 'betaint'. betaint <- actuar:::betaint ## No warnings, unless explicitly asserted via tools::assertWarning. options(warn = 2) assertWarning <- tools::assertWarning ## Special values and utilities. Taken from `tests/d-p-q-r-tests.R`. Meps <- .Machine$double.eps xMax <- .Machine$double.xmax xMin <- .Machine$double.xmin All.eq <- function(x, y) { all.equal.numeric(x, y, tolerance = 64 * .Machine$double.eps, scale = max(0, mean(abs(x), na.rm = TRUE))) } if(!interactive()) set.seed(123) ### ### CONTINUOUS DISTRIBUTIONS ### ## ## FELLER-PARETO AND PARETO II, III, IV DISTRIBUTIONS ## ## When reasonable, we also test consistency with the special cases ## min = 0: ## ## Feller-Pareto -> Transformated beta ## Pareto IV -> Burr ## Pareto III -> Loglogistic ## Pareto II -> Pareto ## Density: first check that functions return 0 when scale = Inf, and ## when x = scale = Inf. stopifnot(exprs = { dfpareto(c(42, Inf), min = 1, shape1 = 2, shape2 = 3, shape3 = 4, scale = Inf) == c(0, 0) dpareto4(c(42, Inf), min = 1, shape1 = 2, shape2 = 3, scale = Inf) == c(0, 0) dpareto3(c(42, Inf), min = 1, shape = 3, scale = Inf) == c(0, 0) dpareto2(c(42, Inf), min = 1, shape = 2, scale = Inf) == c(0, 0) }) ## Next test density functions for an array of standard values. nshpar <- 3 # (maximum) number of shape parameters min <- round(rnorm(30, 2), 2) shpar <- replicate(30, rlnorm(nshpar, 2), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters for (i in seq_along(min)) { m <- min[i] a <- shpar[[c(i, 1)]]; g <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Be <- beta(a, t) for (s in scpar) { x <- rfpareto(100, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s) y <- (x - m)/s u <- 1/(1 + y^(-g)) stopifnot(exprs = { all.equal(d1 <- dfpareto(x, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s), d2 <- dfpareto(y, min = 0, shape1 = a, shape2 = g, shape3 = t, scale = 1)/s, tolerance = 1e-10) all.equal(d2, dtrbeta(y, shape1 = a, shape2 = g, shape3 = t, scale = 1)/s, tolerance = 1e-10) all.equal(d1, g * y^(g*t - 1)/(s * Be * (1 + y^g)^(a + t)), tolerance = 1e-10) all.equal(d1, g * u^t * (1 - u)^a/((x - m) * Be), tolerance = 1e-10) }) x <- rpareto4(100, min = m, shape1 = a, shape2 = g, scale = s) y <- (x - m)/s u <- 1/(1 + y^g) stopifnot(exprs = { all.equal(d1 <- dpareto4(x, min = m, shape1 = a, shape2 = g, scale = s), d2 <- dpareto4(y, min = 0, shape1 = a, shape2 = g, scale = 1)/s, tolerance = 1e-10) all.equal(d2, dburr(y, shape1 = a, shape2 = g, scale = 1)/s, tolerance = 1e-10) all.equal(d1, a * g * y^(g - 1)/(s * (1 + y^g)^(a + 1)), tolerance = 1e-10) all.equal(d1, a * g * u^a * (1 - u)/(x - m), tolerance = 1e-10) }) x <- rpareto3(100, min = m, shape = g, scale = s) y <- (x - m)/s u <- 1/(1 + y^(-g)) stopifnot(exprs = { all.equal(d1 <- dpareto3(x, min = m, shape = g, scale = s), d2 <- dpareto3(y, min = 0, shape = g, scale = 1)/s, tolerance = 1e-10) all.equal(d2, dllogis(y, shape = g, scale = 1)/s, tolerance = 1e-10) all.equal(d1, g * y^(g - 1)/(s * (1 + y^g)^2), tolerance = 1e-10) all.equal(d1, g * u * (1 - u)/(x - m), tolerance = 1e-10) }) x <- rpareto2(100, min = m, shape = a, scale = s) y <- (x - m)/s u <- 1/(1 + y) stopifnot(exprs = { all.equal(d1 <- dpareto2(x, min = m, shape = a, scale = s), d2 <- dpareto2(y, min = 0, shape = a, scale = 1)/s, tolerance = 1e-10) all.equal(d2, dpareto(y, shape = a, scale = 1)/s, tolerance = 1e-10) all.equal(d1, a/(s * (1 + y)^(a + 1)), tolerance = 1e-10) all.equal(d1, a * u^a * (1 - u)/(x - m), tolerance = 1e-10) }) } } ## Tests on the cumulative distribution function. ## ## Note: when shape1 = shape3 = 1, the underlying beta distribution is ## a uniform. Therefore, pfpareto(x, min, 1, shape2, 1, scale) should ## return the value of u = v/(1 + v), v = ((x - min)/scale)^shape2. ## ## x = 2/Meps = 2^53 (with min = 0, shape2 = scale = 1) is the value ## where the cdf would jump to 1 if we weren't using the trick to ## compute the cdf with pbeta(1 - u, ..., lower = FALSE). scLrg <- 1e300 * c(0.5, 1, 2) m <- rnorm(1) stopifnot(exprs = { pfpareto(Inf, min = 10, 1, 2, 3, scale = xMax) == 1 pfpareto(2^53, min = 0, 1, 1, 1, scale = 1) != 1 pfpareto(2^53 + xMax, min = xMax, 1, 1, 1, scale = 1) != 1 all.equal(pfpareto(xMin + m, min = m, 1, 1, 1, scale = 1), xMin) all.equal(y <- pfpareto(1e300 + m, min = m, shape1 = 3, shape2 = rep(c(1, 2), each = length(scLrg)), shape3 = 1, scale = scLrg, log = TRUE), ptrbeta(1e300, shape1 = 3, shape2 = rep(c(1, 2), each = length(scLrg)), shape3 = 1, scale = scLrg, log = TRUE)) all.equal(y, c(pbeta(c(2/3, 1/2), 1, 3, lower.tail = TRUE, log = TRUE), pbeta(2/3, 3, 1, lower.tail = FALSE, log = TRUE), pbeta(c(4/5, 1/2), 1, 3, lower.tail = TRUE, log = TRUE), pbeta(4/5, 3, 1, lower.tail = FALSE, log = TRUE))) }) stopifnot(exprs = { ppareto4(Inf, min = 10, 1, 3, scale = xMax) == 1 ppareto4(2^53, min = 0, 1, 1, scale = 1) != 1 ppareto4(2^53 + xMax, min = xMax, 1, 1, scale = 1) != 1 all.equal(ppareto4(xMin + m, min = m, 1, 1, scale = 1), xMin) all.equal(y <- ppareto4(1e300 + m, min = m, shape1 = 3, shape2 = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE), pburr(1e300, shape1 = 3, shape2 = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE)) all.equal(y, c(log1p(-c(1/3, 1/2, 2/3)^3), log1p(-c(1/5, 1/2, 4/5)^3))) }) stopifnot(exprs = { ppareto3(Inf, min = 10, 3, scale = xMax) == 1 ppareto3(2^53, min = 0, 1, scale = 1) != 1 ppareto3(2^53 + xMax, min = xMax, 1, scale = 1) != 1 all.equal(ppareto3(xMin + m, min = m, 1, scale = 1), xMin) all.equal(y <- ppareto3(1e300 + m, min = m, shape = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE), pllogis (1e300, shape = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE)) all.equal(y, c(log(c(2/3, 1/2, 1/3)), log(c(4/5, 1/2, 1/5)))) }) stopifnot(exprs = { ppareto2(Inf, min = 10, 3, scale = xMax) == 1 ppareto2(2^53, min = 0, 1, scale = 1) != 1 ppareto2(2^53 + xMax, min = xMax, 1, scale = 1) != 1 all.equal(ppareto2(xMin + m, min = m, 1, scale = 1), xMin) all.equal(y <- ppareto2(1e300 + m, min = m, shape = 3, scale = scLrg, log = TRUE), ppareto (1e300, shape = 3, scale = scLrg, log = TRUE)) all.equal(y, c(log1p(-c(1/3, 1/2, 2/3)^3))) }) ## Also check that distribution functions return 0 when scale = Inf. stopifnot(exprs = { pfpareto(x, min = m, shape1 = a, shape2 = g, shape3 = t, scale = Inf) == 0 ppareto4(x, min = m, shape1 = a, shape2 = g, scale = Inf) == 0 ppareto3(x, min = m, shape = g, scale = Inf) == 0 ppareto2(x, min = m, shape = a, scale = Inf) == 0 }) ## Tests for first three (positive) moments ## ## Simulation of new parameters ensuring that the first three moments ## exist. set.seed(123) # reset the seed nshpar <- 3 # (maximum) number of shape parameters min <- round(rnorm(30, 2), 2) shpar <- replicate(30, c(3, 3, 0) + rlnorm(nshpar, 2), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters for (i in seq_along(min)) { m <- min[i] a <- shpar[[c(i, 1)]]; g <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Be <- beta(a, t) Ga <- gamma(a) for (s in scpar) { stopifnot(exprs = { All.eq(mfpareto(1, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s), m * (Be + (s/m) * beta(t + 1/g, a - 1/g))/Be) All.eq(mfpareto(2, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s), m^2 * (Be + 2 * (s/m) * beta(t + 1/g, a - 1/g) + (s/m)^2 * beta(t + 2/g, a - 2/g))/Be) All.eq(mfpareto(3, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s), m^3 * (Be + 3 * (s/m) * beta(t + 1/g, a - 1/g) + 3 * (s/m)^2 * beta(t + 2/g, a - 2/g) + (s/m)^3 * beta(t + 3/g, a - 3/g))/Be) }) stopifnot(exprs = { All.eq(mpareto4(1, min = m, shape1 = a, shape2 = g, scale = s), m * (Ga + (s/m) * gamma(1 + 1/g) * gamma(a - 1/g))/Ga) All.eq(mpareto4(2, min = m, shape1 = a, shape2 = g, scale = s), m^2 * (Ga + 2 * (s/m) * gamma(1 + 1/g) * gamma(a - 1/g) + (s/m)^2 * gamma(1 + 2/g) * gamma(a - 2/g))/Ga) All.eq(mpareto4(3, min = m, shape1 = a, shape2 = g, scale = s), m^3 * (Ga + 3 * (s/m) * gamma(1 + 1/g) * gamma(a - 1/g) + 3 * (s/m)^2 * gamma(1 + 2/g) * gamma(a - 2/g) + (s/m)^3 * gamma(1 + 3/g) * gamma(a - 3/g))/Ga) }) stopifnot(exprs = { All.eq(mpareto3(1, min = m, shape = g, scale = s), m * (1 + (s/m) * gamma(1 + 1/g) * gamma(1 - 1/g))) All.eq(mpareto3(2, min = m, shape = g, scale = s), m^2 * (1 + 2 * (s/m) * gamma(1 + 1/g) * gamma(1 - 1/g) + (s/m)^2 * gamma(1 + 2/g) * gamma(1 - 2/g))) All.eq(mpareto3(3, min = m, shape = g, scale = s), m^3 * (1 + 3 * (s/m) * gamma(1 + 1/g) * gamma(1 - 1/g) + 3 * (s/m)^2 * gamma(1 + 2/g) * gamma(1 - 2/g) + (s/m)^3 * gamma(1 + 3/g) * gamma(1 - 3/g))) }) stopifnot(exprs = { All.eq(mpareto2(1, min = m, shape = a, scale = s), m * (Ga + (s/m) * gamma(1 + 1) * gamma(a - 1))/Ga) All.eq(mpareto2(2, min = m, shape = a, scale = s), m^2 * (Ga + 2 * (s/m) * gamma(1 + 1) * gamma(a - 1) + (s/m)^2 * gamma(1 + 2) * gamma(a - 2))/Ga) All.eq(mpareto2(3, min = m, shape = a, scale = s), m^3 * (Ga + 3 * (s/m) * gamma(1 + 1) * gamma(a - 1) + 3 * (s/m)^2 * gamma(1 + 2) * gamma(a - 2) + (s/m)^3 * gamma(1 + 3) * gamma(a - 3))/Ga) }) } } ## Tests for first three limited moments ## ## Limits are taken from quantiles of each distribution. q <- c(0.25, 0.50, 0.75, 0.9, 0.95) for (i in seq_along(min)) { m <- min[i] a <- shpar[[c(i, 1)]]; g <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Ga <- gamma(a) Gt <- gamma(t) for (s in scpar) { limit <- qfpareto(q, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s) y <- (limit - m)/s u <- 1/(1 + y^(-g)) stopifnot(exprs = { All.eq(levfpareto(limit, order = 1, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s), m * (betaint(u, t, a) + (s/m) * betaint(u, t + 1/g, a - 1/g))/(Ga * Gt) + limit * pbeta(u, t, a, lower = FALSE)) All.eq(levfpareto(limit, order = 2, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s), m^2 * (betaint(u, t, a) + 2 * (s/m) * betaint(u, t + 1/g, a - 1/g) + (s/m)^2 * betaint(u, t + 2/g, a - 2/g))/(Ga * Gt) + limit^2 * pbeta(u, t, a, lower = FALSE)) All.eq(levfpareto(limit, order = 3, min = m, shape1 = a, shape2 = g, shape3 = t, scale = s), m^3 * (betaint(u, t, a) + 3 * (s/m) * betaint(u, t + 1/g, a - 1/g) + 3 * (s/m)^2 * betaint(u, t + 2/g, a - 2/g) + (s/m)^3 * betaint(u, t + 3/g, a - 3/g))/(Ga * Gt) + limit^3 * pbeta(u, t, a, lower = FALSE)) }) limit <- qpareto4(q, min = m, shape1 = a, shape2 = g, scale = s) y <- (limit - m)/s u <- 1/(1 + y^g) u1m <- 1/(1 + y^(-g)) stopifnot(exprs = { All.eq(levpareto4(limit, order = 1, min = m, shape1 = a, shape2 = g, scale = s), m * (betaint(u1m, 1, a) + (s/m) * betaint(u1m, 1 + 1/g, a - 1/g))/Ga + limit * u^a) All.eq(levpareto4(limit, order = 2, min = m, shape1 = a, shape2 = g, scale = s), m^2 * (betaint(u1m, 1, a) + 2 * (s/m) * betaint(u1m, 1 + 1/g, a - 1/g) + (s/m)^2 * betaint(u1m, 1 + 2/g, a - 2/g))/Ga + limit^2 * u^a) All.eq(levpareto4(limit, order = 3, min = m, shape1 = a, shape2 = g, scale = s), m^3 * (betaint(u1m, 1, a) + 3 * (s/m) * betaint(u1m, 1 + 1/g, a - 1/g) + 3 * (s/m)^2 * betaint(u1m, 1 + 2/g, a - 2/g) + (s/m)^3 * betaint(u1m, 1 + 3/g, a - 3/g))/Ga + limit^3 * u^a) }) limit <- qpareto3(q, min = m, shape = g, scale = s) y <- (limit - m)/s u <- 1/(1 + y^(-g)) u1m <- 1/(1 + y^g) stopifnot(exprs = { All.eq(levpareto3(limit, order = 1, min = m, shape = g, scale = s), m * (u + (s/m) * betaint(u, 1 + 1/g, 1 - 1/g)) + limit * u1m) All.eq(levpareto3(limit, order = 2, min = m, shape = g, scale = s), m^2 * (u + 2 * (s/m) * betaint(u, 1 + 1/g, 1 - 1/g) + (s/m)^2 * betaint(u, 1 + 2/g, 1 - 2/g)) + limit^2 * u1m) All.eq(levpareto3(limit, order = 3, min = m, shape = g, scale = s), m^3 * (u + 3 * (s/m) * betaint(u, 1 + 1/g, 1 - 1/g) + 3 * (s/m)^2 * betaint(u, 1 + 2/g, 1 - 2/g) + (s/m)^3 * betaint(u, 1 + 3/g, 1 - 3/g)) + limit^3 * u1m) }) limit <- qpareto2(q, min = m, shape = a, scale = s) y <- (limit - m)/s u <- 1/(1 + y) u1m <- 1/(1 + y^(-1)) stopifnot(exprs = { All.eq(levpareto2(limit, order = 1, min = m, shape = a, scale = s), m * (betaint(u1m, 1, a) + (s/m) * betaint(u1m, 1 + 1, a - 1))/Ga + limit * u^a) All.eq(levpareto2(limit, order = 2, min = m, shape = a, scale = s), m^2 * (betaint(u1m, 1, a) + 2 * (s/m) * betaint(u1m, 1 + 1, a - 1) + (s/m)^2 * betaint(u1m, 1 + 2, a - 2))/Ga + limit^2 * u^a) All.eq(levpareto2(limit, order = 3, min = m, shape = a, scale = s), m^3 * (betaint(u1m, 1, a) + 3 * (s/m) * betaint(u1m, 1 + 1, a - 1) + 3 * (s/m)^2 * betaint(u1m, 1 + 2, a - 2) + (s/m)^3 * betaint(u1m, 1 + 3, a - 3))/Ga + limit^3 * u^a) }) } } ## ## TRANSFORMED BETA FAMILY ## ## Density: first check that functions return 0 when scale = Inf, and ## when x = scale = Inf. stopifnot(exprs = { dtrbeta (c(42, Inf), shape1 = 2, shape2 = 3, shape3 = 4, scale = Inf) == c(0, 0) dburr (c(42, Inf), shape1 = 2, shape2 = 3, scale = Inf) == c(0, 0) dllogis (c(42, Inf), shape = 3, scale = Inf) == c(0, 0) dparalogis (c(42, Inf), shape = 2, scale = Inf) == c(0, 0) dgenpareto (c(42, Inf), shape1 = 2, shape2 = 4, scale = Inf) == c(0, 0) dpareto (c(42, Inf), shape = 2, scale = Inf) == c(0, 0) dinvburr (c(42, Inf), shape1 = 4, shape2 = 3, scale = Inf) == c(0, 0) dinvpareto (c(42, Inf), shape = 4, scale = Inf) == c(0, 0) dinvparalogis(c(42, Inf), shape = 4, scale = Inf) == c(0, 0) }) ## Next test density functions for an array of standard values. set.seed(123) # reset the seed nshpar <- 3 # (maximum) number of shape parameters shpar <- replicate(30, rlnorm(nshpar, 2), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; g <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Be <- beta(a, t) for (s in scpar) { x <- rtrbeta(100, shape1 = a, shape2 = g, shape3 = t, scale = s) y <- x/s u <- 1/(1 + y^(-g)) stopifnot(exprs = { all.equal(d1 <- dtrbeta(x, shape1 = a, shape2 = g, shape3 = t, scale = s), d2 <- dtrbeta(y, shape1 = a, shape2 = g, shape3 = t, scale = 1)/s, tolerance = 1e-10) all.equal(d1, g * y^(g*t - 1)/(s * Be * (1 + y^g)^(a + t)), tolerance = 1e-10) all.equal(d1, g * u^t * (1 - u)^a/(x * Be), tolerance = 1e-10) }) x <- rburr(100, shape1 = a, shape2 = g, scale = s) y <- x/s u <- 1/(1 + y^g) stopifnot(exprs = { all.equal(d1 <- dburr(x, shape1 = a, shape2 = g, scale = s), d2 <- dburr(y, shape1 = a, shape2 = g, scale = 1)/s, tolerance = 1e-10) all.equal(d1, a * g * y^(g - 1)/(s * (1 + y^g)^(a + 1)), tolerance = 1e-10) all.equal(d1, a * g * u^a * (1 - u)/x, tolerance = 1e-10) }) x <- rllogis(100, shape = g, scale = s) y <- x/s u <- 1/(1 + y^(-g)) stopifnot(exprs = { all.equal(d1 <- dllogis(x, shape = g, scale = s), d2 <- dllogis(y, shape = g, scale = 1)/s, tolerance = 1e-10) all.equal(d1, g * y^(g - 1)/(s * (1 + y^g)^2), tolerance = 1e-10) all.equal(d1, g * u * (1 - u)/x, tolerance = 1e-10) }) x <- rparalogis(100, shape = a, scale = s) y <- x/s u <- 1/(1 + y^a) stopifnot(exprs = { all.equal(d1 <- dparalogis(x, shape = a, scale = s), d2 <- dparalogis(y, shape = a, scale = 1)/s, tolerance = 1e-10) all.equal(d1, a^2 * y^(a - 1)/(s * (1 + y^a)^(a + 1)), tolerance = 1e-10) all.equal(d1, a^2 * u^a * (1 - u)/x, tolerance = 1e-10) }) x <- rgenpareto(100, shape1 = a, shape2 = t, scale = s) y <- x/s u <- 1/(1 + y^(-1)) stopifnot(exprs = { all.equal(d1 <- dgenpareto(x, shape1 = a, shape2 = t, scale = s), d2 <- dgenpareto(y, shape1 = a, shape2 = t, scale = 1)/s, tolerance = 1e-10) all.equal(d1, y^(t - 1)/(s * Be * (1 + y)^(a + t)), tolerance = 1e-10) all.equal(d1, u^t * (1 - u)^a/(x * Be), tolerance = 1e-10) }) x <- rpareto(100, shape = a, scale = s) y <- x/s u <- 1/(1 + y) stopifnot(exprs = { all.equal(d1 <- dpareto(x, shape = a, scale = s), d2 <- dpareto(y, shape = a, scale = 1)/s, tolerance = 1e-10) all.equal(d1, a/(s * (1 + y)^(a + 1)), tolerance = 1e-10) all.equal(d1, a * u^a * (1 - u)/x, tolerance = 1e-10) }) x <- rpareto1(100, min = s, shape = a) stopifnot(exprs = { all.equal(d1 <- dpareto1(x, min = s, shape = a), a * s^a/(x^(a + 1)), tolerance = 1e-10) }) x <- rinvburr(100, shape1 = t, shape2 = g, scale = s) y <- x/s u <- 1/(1 + y^(-g)) stopifnot(exprs = { all.equal(d1 <- dinvburr(x, shape1 = t, shape2 = g, scale = s), d2 <- dinvburr(y, shape1 = t, shape2 = g, scale = 1)/s, tolerance = 1e-10) all.equal(d1, t * g * y^(g*t - 1)/(s * (1 + y^g)^(t + 1)), tolerance = 1e-10) all.equal(d1, t * g * u^t * (1 - u)/x, tolerance = 1e-10) }) x <- rinvpareto(100, shape = t, scale = s) y <- x/s u <- 1/(1 + y^(-1)) stopifnot(exprs = { all.equal(d1 <- dinvpareto(x, shape = t, scale = s), d2 <- dinvpareto(y, shape = t, scale = 1)/s, tolerance = 1e-10) all.equal(d1, t * y^(t - 1)/(s * (1 + y)^(t + 1)), tolerance = 1e-10) all.equal(d1, t * u^t * (1 - u)/x, tolerance = 1e-10) }) x <- rinvparalogis(100, shape = t, scale = s) y <- x/s u <- 1/(1 + y^(-t)) stopifnot(exprs = { all.equal(d1 <- dinvparalogis(x, shape = t, scale = s), d2 <- dinvparalogis(y, shape = t, scale = 1)/s, tolerance = 1e-10) all.equal(d1, t^2 * y^(t^2 - 1)/(s * (1 + y^t)^(t + 1)), tolerance = 1e-10) all.equal(d1, t^2 * u^t * (1 - u)/x, tolerance = 1e-10) }) } } ## Tests on the cumulative distribution function. ## ## Note: when shape1 = shape3 = 1, the underlying beta distribution is ## a uniform. Therefore, ptrbeta(x, 1, shape2, 1, scale) should return ## the value of u = v/(1 + v), v = (x/scale)^shape2. ## ## x = 2/Meps = 2^53 (with, shape2 = scale = 1) is the value where the ## cdf would jump to 1 if we weren't using the trick to compute the ## cdf with pbeta(1 - u, ..., lower = FALSE). scLrg <- 1e300 * c(0.5, 1, 2) stopifnot(exprs = { ptrbeta(Inf, 1, 2, 3, scale = xMax) == 1 ptrbeta(2^53, 1, 1, 1, scale = 1) != 1 all.equal(ptrbeta(xMin, 1, 1, 1, scale = 1), xMin) all.equal(ptrbeta(1e300, shape1 = 3, shape2 = rep(c(1, 2), each = length(scLrg)), shape3 = 1, scale = scLrg, log = TRUE), c(pbeta(c(2/3, 1/2), 1, 3, lower.tail = TRUE, log = TRUE), pbeta(2/3, 3, 1, lower.tail = FALSE, log = TRUE), pbeta(c(4/5, 1/2), 1, 3, lower.tail = TRUE, log = TRUE), pbeta(4/5, 3, 1, lower.tail = FALSE, log = TRUE))) }) stopifnot(exprs = { pburr(Inf, 1, 3, scale = xMax) == 1 pburr(2^53, 1, 1, scale = 1) != 1 all.equal(pburr(xMin, 1, 1, scale = 1), xMin) all.equal(pburr(1e300, shape1 = 3, shape2 = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE), c(log1p(-c(1/3, 1/2, 2/3)^3), log1p(-c(1/5, 1/2, 4/5)^3))) }) stopifnot(exprs = { pllogis(Inf, 3, scale = xMax) == 1 pllogis(2^53, 1, scale = 1) != 1 all.equal(pllogis(xMin, 1, scale = 1), xMin) all.equal(pllogis(1e300, shape = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE), c(log(c(2/3, 1/2, 1/3)), log(c(4/5, 1/2, 1/5)))) }) stopifnot(exprs = { pparalogis(Inf, 3, scale = xMax) == 1 pparalogis(2^53, 1, scale = 1) != 1 all.equal(pparalogis(xMin, 1, scale = 1), xMin) all.equal(pparalogis(1e300, shape = rep(c(2, 3), each = length(scLrg)), scale = scLrg, log = TRUE), c(log1p(-c(1/5, 1/2, 4/5)^2), log1p(-c(1/9, 1/2, 8/9)^3))) }) stopifnot(exprs = { pgenpareto(Inf, 1, 3, scale = xMax) == 1 pgenpareto(2^53, 1, 1, scale = 1) != 1 all.equal(pgenpareto(xMin, 1, 1, scale = 1), xMin) all.equal(pgenpareto(1e300, shape1 = 3, shape2 = 1, scale = scLrg, log = TRUE), c(pbeta(c(2/3, 1/2), 1, 3, lower.tail = TRUE, log = TRUE), pbeta(2/3, 3, 1, lower.tail = FALSE, log = TRUE))) }) stopifnot(exprs = { ppareto(Inf, 3, scale = xMax) == 1 ppareto(2^53, 1, scale = 1) != 1 all.equal(ppareto(xMin, 1, scale = 1), xMin) all.equal(ppareto(1e300, shape = 3, scale = scLrg, log = TRUE), c(log1p(-c(1/3, 1/2, 2/3)^3))) }) stopifnot(exprs = { ppareto1(Inf, 3, min = xMax) == 1 ppareto1(2^53, 1, min = 1) != 1 all.equal(ppareto1(xMin, 1, min = 1), xMin) all.equal(ppareto1(1e300, shape = 3, min = 1e300 * c(0.001, 0.1, 0.5), log = TRUE), c(log1p(-c(0.001, 0.1, 0.5)^3))) }) stopifnot(exprs = { pinvburr(Inf, 1, 3, scale = xMax) == 1 pinvburr(2^53, 1, 1, scale = 1) != 1 all.equal(pinvburr(xMin, 1, 1, scale = 1), xMin) all.equal(pinvburr(1e300, shape1 = 3, shape2 = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE), c(log(c(2/3, 1/2, 1/3)^3), log(c(4/5, 1/2, 1/5)^3))) }) stopifnot(exprs = { pinvpareto(Inf, 3, scale = xMax) == 1 pinvpareto(2^53, 1, scale = 1) != 1 all.equal(pinvpareto(xMin, 1, scale = 1), xMin) all.equal(pinvpareto(1e300, shape = 3, scale = scLrg, log = TRUE), c(log(c(2/3, 1/2, 1/3)^3))) }) stopifnot(exprs = { pinvparalogis(Inf, 3, scale = xMax) == 1 pinvparalogis(2^53, 1, scale = 1) != 1 all.equal(pinvparalogis(xMin, 1, scale = 1), xMin) all.equal(pinvparalogis(1e300, shape = rep(c(2, 3), each = length(scLrg)), scale = scLrg, log = TRUE), c(log(c(4/5, 1/2, 1/5)^2), log(c(8/9, 1/2, 1/9)^3))) }) ## Also check that distribution functions return 0 when scale = Inf. stopifnot(exprs = { ptrbeta (x, shape1 = a, shape2 = g, shape3 = t, scale = Inf) == 0 pburr (x, shape1 = a, shape2 = g, scale = Inf) == 0 pllogis (x, shape = g, scale = Inf) == 0 pparalogis (x, shape = a, scale = Inf) == 0 pgenpareto (x, shape1 = a, shape2 = t, scale = Inf) == 0 ppareto (x, shape = a, scale = Inf) == 0 pinvburr (x, shape1 = t, shape2 = g, scale = Inf) == 0 pinvpareto (x, shape = t, scale = Inf) == 0 pinvparalogis(x, shape = t, scale = Inf) == 0 }) ## Tests for first three positive moments and first two negative ## moments. ## ## Simulation of new parameters ensuring that said moments exist. set.seed(123) # reset the seed nshpar <- 3 # (maximum) number of shape parameters shpar <- replicate(30, c(3, 3, 3) + rlnorm(nshpar, 2), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters k <- c(-2, -1, 1, 2, 3) # orders for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; g <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Be <- beta(a, t) Ga <- gamma(a) for (s in scpar) { stopifnot(exprs = { All.eq(mtrbeta(k, shape1 = a, shape2 = g, shape3 = t, scale = s), s^k * beta(t + k/g, a - k/g)/Be) All.eq(mburr(k, shape1 = a, shape2 = g, scale = s), s^k * gamma(1 + k/g) * gamma(a - k/g)/Ga) All.eq(mllogis(k, shape = g, scale = s), s^k * gamma(1 + k/g) * gamma(1 - k/g)) All.eq(mparalogis(k, shape = a, scale = s), s^k * gamma(1 + k/a) * gamma(a - k/a)/Ga) All.eq(mgenpareto(k, shape1 = a, shape2 = t, scale = s), s^k * beta(t + k, a - k)/Be) All.eq(mpareto(k[k > -1], shape = a, scale = s), s^k[k > -1] * gamma(1 + k[k > -1]) * gamma(a - k[k > -1])/Ga) All.eq(mpareto1(k, shape = a, min = s), s^k * a/(a - k)) All.eq(minvburr(k, shape1 = a, shape2 = g, scale = s), s^k * gamma(a + k/g) * gamma(1 - k/g)/Ga) All.eq(minvpareto(k[k < 1], shape = a, scale = s), s^k[k < 1] * gamma(a + k[k < 1]) * gamma(1 - k[k < 1])/Ga) All.eq(minvparalogis(k, shape = a, scale = s), s^k * gamma(a + k/a) * gamma(1 - k/a)/Ga) }) } } ## Tests for first three positive limited moments and first two ## negative limited moments. ## ## Limits are taken from quantiles of each distribution. order <- c(-2, -1, 1, 2, 3) # orders q <- c(0.25, 0.50, 0.75, 0.9, 0.95) # quantiles for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; g <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Ga <- gamma(a) Gt <- gamma(t) for (s in scpar) { limit <- qtrbeta(q, shape1 = a, shape2 = g, shape3 = t, scale = s) y <- limit/s u <- 1/(1 + y^(-g)) for (k in order) stopifnot(exprs = { All.eq(levtrbeta(limit, order = k, shape1 = a, shape2 = g, shape3 = t, scale = s), s^k * betaint(u, t + k/g, a - k/g)/(Ga * Gt) + limit^k * pbeta(u, t, a, lower = FALSE)) }) limit <- qburr(q, shape1 = a, shape2 = g, scale = s) y <- limit/s u <- 1/(1 + y^g) for (k in order) stopifnot(exprs = { All.eq(levburr(limit, order = k, shape1 = a, shape2 = g, scale = s), s^k * betaint(1 - u, 1 + k/g, a - k/g)/Ga + limit^k * u^a) }) limit <- qllogis(q, shape = g, scale = s) y <- limit/s u <- 1/(1 + y^(-g)) for (k in order) stopifnot(exprs = { All.eq(levllogis(limit, order = k, shape = g, scale = s), s^k * betaint(u, 1 + k/g, 1 - k/g) + limit^k * (1 - u)) }) limit <- qparalogis(q, shape = a, scale = s) y <- limit/s u <- 1/(1 + y^a) for (k in order) stopifnot(exprs = { All.eq(levparalogis(limit, order = k, shape = a, scale = s), s^k * betaint(1 - u, 1 + k/a, a - k/a)/Ga + limit^k * u^a) }) limit <- qgenpareto(q, shape1 = a, shape2 = t, scale = s) y <- limit/s u <- 1/(1 + y^(-1)) for (k in order) stopifnot(exprs = { All.eq(levgenpareto(limit, order = k, shape1 = a, shape2 = t, scale = s), s^k * betaint(u, t + k, a - k)/(Ga * Gt) + limit^k * pbeta(u, t, a, lower = FALSE)) }) limit <- qpareto(q, shape = a, scale = s) y <- limit/s u <- 1/(1 + y) for (k in order[order > -1]) stopifnot(exprs = { All.eq(levpareto(limit, order = k, shape = a, scale = s), s^k * betaint(1 - u, 1 + k, a - k)/Ga + limit^k * u^a) }) limit <- qpareto1(q, shape = a, min = s) for (k in order) stopifnot(exprs = { All.eq(levpareto1(limit, order = k, shape = a, min = s), s^k * a/(a - k) - k * s^a/((a - k) * limit^(a - k))) }) limit <- qinvburr(q, shape1 = a, shape2 = g, scale = s) y <- limit/s u <- 1/(1 + y^(-g)) for (k in order) stopifnot(exprs = { All.eq(levinvburr(limit, order = k, shape1 = a, shape2 = g, scale = s), s^k * betaint(u, a + k/g, 1 - k/g)/Ga + limit^k * (1 - u^a)) }) limit <- qinvpareto(q, shape = a, scale = s) y <- limit/s u <- 1/(1 + y^(-1)) for (k in order[order < 1]) stopifnot(exprs = { All.eq(levinvpareto(limit, order = k, shape = a, scale = s), s^k * a * sapply(u, function(upper) integrate(function(x) x^(a+k-1) * (1-x)^(-k), lower = 0, upper = upper)$value) + limit^k * (1 - u^a)) }) limit <- qinvparalogis(q, shape = a, scale = s) y <- limit/s u <- 1/(1 + y^(-a)) for (k in order) stopifnot(exprs = { All.eq(levinvparalogis(limit, order = k, shape = a, scale = s), s^k * betaint(u, a + k/a, 1 - k/a)/Ga + limit^k * (1 - u^a)) }) } } ## ## TRANSFORMED GAMMA AND INVERSE TRANSFORMED GAMMA FAMILIES ## ## Density: first check that functions return 0 when scale = Inf, and ## when x = scale = Inf (transformed gamma), or when scale = 0 and ## when x = scale = 0 (inverse distributions). stopifnot(exprs = { dtrgamma (c(42, Inf), shape1 = 2, shape2 = 3, scale = Inf) == c(0, 0) dinvtrgamma(c(42, 0), shape1 = 2, shape2 = 3, scale = 0) == c(0, 0) dinvgamma (c(42, 0), shape = 2, scale = 0) == c(0, 0) dinvweibull(c(42, 0), shape = 3, scale = 0) == c(0, 0) dinvexp (c(42, 0), scale = 0) == c(0, 0) }) ## Tests on the density set.seed(123) # reset the seed nshpar <- 2 # (maximum) number of shape parameters shpar <- replicate(30, rgamma(nshpar, 5), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; t <- shpar[[c(i, 2)]] Ga <- gamma(a) for (s in scpar) { x <- rtrgamma(100, shape1 = a, shape2 = t, scale = s) y <- x/s u <- y^t stopifnot(exprs = { all.equal(d1 <- dtrgamma(x, shape1 = a, shape2 = t, scale = s), d2 <- dtrgamma(y, shape1 = a, shape2 = t, scale = 1)/s, tolerance = 1e-10) all.equal(d2, t/(Ga * s^(a * t)) * x^(a * t - 1) * exp(-u), tolerance = 1e-10) all.equal(d1, t/(Ga * x) * u^a * exp(-u), tolerance = 1e-10) }) x <- rinvtrgamma(100, shape1 = a, shape2 = t, scale = s) y <- x/s u <- y^(-t) stopifnot(exprs = { all.equal(d1 <- dinvtrgamma(x, shape1 = a, shape2 = t, scale = s), d2 <- dinvtrgamma(y, shape1 = a, shape2 = t, scale = 1)/s, tolerance = 1e-10) all.equal(d2, t * s^(a * t)/(Ga * x^(a * t + 1)) * exp(-u), tolerance = 1e-10) all.equal(d1, t/(Ga * x) * u^a * exp(-u), tolerance = 1e-10) }) x <- rinvgamma(100, shape = a, scale = s) y <- x/s u <- y^(-1) stopifnot(exprs = { all.equal(d1 <- dinvgamma(x, shape = a, scale = s), d2 <- dinvgamma(y, shape = a, scale = 1)/s, tolerance = 1e-10) all.equal(d2, s^a/(Ga * x^(a + 1)) * exp(-u), tolerance = 1e-10) all.equal(d1, 1/(Ga * x) * u^a * exp(-u), tolerance = 1e-10) }) x <- rinvweibull(100, shape = t, scale = s) y <- x/s u <- y^(-t) stopifnot(exprs = { all.equal(d1 <- dinvweibull(x, shape = t, scale = s), d2 <- dinvweibull(y, shape = t, scale = 1)/s, tolerance = 1e-10) all.equal(d2, t * s^t/x^(t + 1) * exp(-u), tolerance = 1e-10) all.equal(d1, t/x * u * exp(-u), tolerance = 1e-10) }) x <- rinvexp(100, scale = s) y <- x/s u <- y^(-1) stopifnot(exprs = { all.equal(d1 <- dinvexp(x, scale = s), d2 <- dinvexp(y, scale = 1)/s, tolerance = 1e-10) all.equal(d2, s/x^2 * exp(-u), tolerance = 1e-10) all.equal(d1, 1/x * u * exp(-u), tolerance = 1e-10) }) } } ## Tests on the cumulative distribution function. scLrg <- c(2, 100, 1e300 * c(0.1, 1, 10, 100), 1e307, xMax, Inf) stopifnot(exprs = { ptrgamma(Inf, 2, 3, scale = xMax) == 1 ptrgamma(xMax, 2, 3, scale = xMax) == pgamma(1, 2, 1) ptrgamma(xMin, 2, 1, scale = 1) == pgamma(xMin, 2, 1) all.equal(ptrgamma(1e300, shape1 = 2, shape2 = 1, scale = scLrg, log = TRUE), pgamma(c(5e299, 1e+298, 10, 1, 0.1, 0.01, 1e-7, 1e+300/xMax, 0), 2, 1, log = TRUE)) }) scLrg <- c(2, 100, 1e300 * c(0.1, 1, 10, 100), 1e307, xMax, 0) stopifnot(exprs = { pinvtrgamma(Inf, 2, 3, scale = xMax) == 1 pinvtrgamma(xMax, 2, 3, scale = xMax) == pgamma(1, 2, 1, lower = FALSE) pinvtrgamma(xMin, 2, 1, scale = 1) == pgamma(1/xMin, 2, 1, lower = FALSE) all.equal(pinvtrgamma(1e300, shape1 = 2, shape2 = 1, scale = scLrg, log = TRUE), pgamma(c(2e-300, 1e-298, 0.1, 1, 10, 100, 1e+7, xMax/1e+300, 0), 2, 1, lower = FALSE, log = TRUE)) }) stopifnot(exprs = { pinvgamma(Inf, 2, scale = xMax) == 1 pinvgamma(xMax, 2, scale = xMax) == pgamma(1, 2, 1, lower = FALSE) pinvgamma(xMin, 2, scale = 1) == pgamma(1/xMin, 2, 1, lower = FALSE) all.equal(pinvgamma(1e300, shape = 2, scale = scLrg, log = TRUE), pgamma(c(2e-300, 1e-298, 0.1, 1, 10, 100, 1e+7, xMax/1e+300, 0), 2, 1, lower = FALSE, log = TRUE)) }) stopifnot(exprs = { pinvweibull(Inf, 3, scale = xMax) == 1 pinvweibull(xMax, 3, scale = xMax) == exp(-1) pinvweibull(xMin, 1, scale = 1) == exp(-1/xMin) all.equal(pinvweibull(1e300, shape = 1, scale = scLrg, log = TRUE), -c(2e-300, 1e-298, 0.1, 1, 10, 100, 1e+7, xMax/1e+300, 0)) }) stopifnot(exprs = { pinvexp(Inf, 3, scale = xMax) == 1 pinvexp(xMax, 3, scale = xMax) == exp(-1) pinvexp(xMin, 1, scale = 1) == exp(-1/xMin) all.equal(pinvexp(1e300, scale = scLrg, log = TRUE), -c(2e-300, 1e-298, 0.1, 1, 10, 100, 1e+7, xMax/1e+300, 0)) }) ## Tests for first three positive moments and first two negative ## moments. (Including for the Gamma, Weibull and Exponential ## distributions of base R.) ## ## Simulation of new parameters ensuring that said moments exist. set.seed(123) # reset the seed nshpar <- 2 # (maximum) number of shape parameters shpar <- replicate(30, c(3, 3) + rlnorm(nshpar, 2), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters k <- c(-2, -1, 1, 2, 3) # orders for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; t <- shpar[[c(i, 2)]] Ga <- gamma(a) for (s in scpar) { stopifnot(exprs = { All.eq(mtrgamma(k, shape1 = a, shape2 = t, scale = s), s^k * gamma(a + k/t)/Ga) All.eq(mgamma(k, shape = a, scale = s), s^k * gamma(a + k)/Ga) All.eq(mweibull(k, shape = t, scale = s), s^k * gamma(1 + k/t)) All.eq(mexp(k[k > -1], rate = 1/s), s^k[k > -1] * gamma(1 + k[k > -1])) All.eq(minvtrgamma(k, shape1 = a, shape2 = t, scale = s), s^k * gamma(a - k/t)/Ga) All.eq(minvgamma(k, shape = a, scale = s), s^k * gamma(a - k)/Ga) All.eq(minvweibull(k, shape = t, scale = s), s^k * gamma(1 - k/t)) All.eq(minvexp(k[k < 1], scale = s), s^k[k < 1] * gamma(1 - k[k < 1])) }) } } ## Tests for first three positive limited moments and first two ## negative limited moments. (Including for the Gamma, Weibull and ## Exponential distributions of base R.) ## ## Limits are taken from quantiles of each distribution. order <- c(-2, -1, 1, 2, 3) # orders q <- c(0.25, 0.50, 0.75, 0.9, 0.95) # quantiles for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; t <- shpar[[c(i, 2)]] Ga <- gamma(a) for (s in scpar) { limit <- qtrgamma(q, shape1 = a, shape2 = t, scale = s) y <- limit/s u <- y^t for (k in order) stopifnot(exprs = { All.eq(levtrgamma(limit, order = k, shape1 = a, shape2 = t, scale = s), s^k * gamma(a + k/t)/Ga * pgamma(u, a + k/t, scale = 1) + limit^k * pgamma(u, a, scale = 1, lower = FALSE)) }) limit <- qgamma(q, shape = a, scale = s) y <- limit/s for (k in order) stopifnot(exprs = { All.eq(levgamma(limit, order = k, shape = a, scale = s), s^k * gamma(a + k)/Ga * pgamma(y, a + k, scale = 1) + limit^k * pgamma(y, a, scale = 1, lower = FALSE)) }) limit <- qweibull(q, shape = t, scale = s) y <- limit/s u <- y^t for (k in order) stopifnot(exprs = { All.eq(levweibull(limit, order = k, shape = t, scale = s), s^k * gamma(1 + k/t) * pgamma(u, 1 + k/t, scale = 1) + limit^k * pgamma(u, 1, scale = 1, lower = FALSE)) }) limit <- qexp(q, rate = 1/s) y <- limit/s for (k in order[order > -1]) stopifnot(exprs = { All.eq(levexp(limit, order = k, rate = 1/s), s^k * gamma(1 + k) * pgamma(y, 1 + k, scale = 1) + limit^k * pgamma(y, 1, scale = 1, lower = FALSE)) }) limit <- qinvtrgamma(q, shape1 = a, shape2 = t, scale = s) y <- limit/s u <- y^(-t) for (k in order) stopifnot(exprs = { All.eq(levinvtrgamma(limit, order = k, shape1 = a, shape2 = t, scale = s), s^k * (gammainc(a - k/t, u)/Ga) + limit^k * pgamma(u, a, scale = 1)) }) limit <- qinvgamma(q, shape = a, scale = s) y <- limit/s u <- y^(-1) for (k in order) stopifnot(exprs = { All.eq(levinvgamma(limit, order = k, shape = a, scale = s), s^k * (gammainc(a - k, u)/Ga) + limit^k * pgamma(u, a, scale = 1)) }) limit <- qinvweibull(q, shape = t, scale = s) y <- limit/s u <- y^(-t) for (k in order) stopifnot(exprs = { All.eq(levinvweibull(limit, order = k, shape = t, scale = s), s^k * gammainc(1 - k/t, u) + limit^k * (-expm1(-u))) }) limit <- qinvexp(q, scale = s) y <- limit/s u <- y^(-1) for (k in order) stopifnot(exprs = { All.eq(levinvexp(limit, order = k, scale = s), s^k * gammainc(1 - k, u) + limit^k * (-expm1(-u))) }) } } ## ## OTHER DISTRIBUTIONS ## ## Distributions in this category are quite different, so let's treat ## them separately. ## LOGGAMMA ## Tests on the density. set.seed(123) # reset the seed stopifnot(exprs = { dlgamma(c(42, Inf), shapelog = 2, ratelog = 0) == c(0, 0) }) assertWarning(stopifnot(exprs = { is.nan(dlgamma(c(0, 42, Inf), shapelog = 2, ratelog = Inf)) })) x <- rlgamma(100, shapelog = 2, ratelog = 1) for (a in round(rlnorm(30), 2)) { Ga <- gamma(a) for (r in round(rlnorm(30), 2)) stopifnot(exprs = { All.eq(dlgamma(x, shapelog = a, ratelog = r), r^a * (log(x))^(a - 1)/(Ga * x^(r + 1))) }) } ## Tests on the cumulative distribution function. assertWarning(stopifnot(exprs = { is.nan(plgamma(Inf, 1, ratelog = Inf)) is.nan(plgamma(Inf, Inf, ratelog = Inf)) })) scLrg <- log(c(2, 100, 1e300 * c(0.1, 1, 10, 100), 1e307, xMax, Inf)) stopifnot(exprs = { plgamma(Inf, 2, ratelog = xMax) == 1 plgamma(xMax, 2, ratelog = 0) == 0 all.equal(plgamma(1e300, 2, ratelog = 1/scLrg, log = TRUE), pgamma(log(1e300), 2, scale = scLrg, log = TRUE)) }) ## Tests for first three positive moments and first two negative ## moments. set.seed(123) # reset the seed k <- c(-2, -1, 1, 2, 3) # orders for (a in round(rlnorm(30), 2)) { Ga <- gamma(a) for (r in 3 + round(rlnorm(30), 2)) stopifnot(exprs = { All.eq(mlgamma(k, shapelog = a, ratelog = r), (1 - k/r)^(-a)) }) } ## Tests for first three positive limited moments and first two ## negative limited moments. order <- c(-2, -1, 1, 2, 3) # orders q <- c(0.25, 0.50, 0.75, 0.9, 0.95) # quantiles for (a in round(rlnorm(30), 2)) { Ga <- gamma(a) for (r in 3 + round(rlnorm(30), 2)) { limit <- qlgamma(q, shapelog = a, ratelog = r) for (k in order) { u <- log(limit) stopifnot(exprs = { All.eq(levlgamma(limit, order = k, shapelog = a, ratelog = r), (1 - k/r)^(-a) * pgamma((r - k) * u, a, scale = 1) + limit^k * pgamma(r * u, a, scale = 1,lower = FALSE)) }) } } } ## GUMBEL ## Tests on the density. set.seed(123) # reset the seed stopifnot(exprs = { dgumbel(c(1, 3, Inf), alpha = 2, scale = Inf) == c(0, 0, 0) dgumbel(c(1, 2, 3), alpha = 2, scale = 0) == c(0, Inf, 0) dgumbel(c(-Inf, Inf), alpha = 1, scale = 1) == c(0, 0) dgumbel(1, alpha = Inf, scale = 1) == 0 }) assertWarning(stopifnot(exprs = { is.nan(dgumbel(Inf, alpha = Inf, scale = 1)) is.nan(dgumbel(-Inf, alpha = -Inf, scale = 1)) is.nan(dgumbel(Inf, alpha = 1, scale = -1)) is.nan(dgumbel(1, alpha = 1, scale = -1)) is.nan(dgumbel(1, alpha = Inf, scale = -1)) })) x <- rgumbel(100, alpha = 2, scale = 5) for (a in round(rlnorm(30), 2)) { Ga <- gamma(a) for (s in round(rlnorm(30), 2)) { u <- (x - a)/s stopifnot(exprs = { All.eq(dgumbel(x, alpha = a, scale = s), exp(-(u + exp(-u)))/s) }) } } ## Tests on the cumulative distribution function. assertWarning(stopifnot(exprs = { is.nan(pgumbel(Inf, alpha = Inf, scale = 1)) is.nan(pgumbel(-Inf, alpha = -Inf, scale = 1)) is.nan(pgumbel(Inf, alpha = 1, scale = -1)) is.nan(pgumbel(1, alpha = 1, scale = -1)) is.nan(pgumbel(1, alpha = Inf, scale = -1)) })) scLrg <- c(2, 100, 1e300 * c(0.1, 1, 10, 100), 1e307, xMax, Inf) stopifnot(exprs = { pgumbel(c(-Inf, Inf), 2, scale = xMax) == c(0, 1) pgumbel(c(xMin, xMax), 2, scale = 0) == c(0, 1) all.equal(pgumbel(1e300, 0, scale = scLrg, log = TRUE), -exp(-c(5e299, 1e+298, 10, 1, 0.1, 0.01, 1e-7, 1e+300/xMax, 0))) }) ## Test the first two moments, the only ones implemented. assertWarning(stopifnot(exprs = { is.nan(mgumbel(c(-2, -1, 3, 4), alpha = 2, scale = 5)) })) stopifnot(exprs = { All.eq(mgumbel(1, alpha = 2, scale = 5), 2 + 5 * 0.577215664901532860606512090082) All.eq(mgumbel(2, alpha = 2, scale = 5), pi^2 * 25/6 + (2 + 5 * 0.577215664901532860606512090082)^2) }) ## INVERSE GAUSSIAN ## Tests on the density. set.seed(123) # reset the seed stopifnot(exprs = { dinvgauss(c(1, 3, Inf), mean = 2, dispersion = Inf) == c(0, 0, 0) dinvgauss(c(0, 42, Inf), mean = 2, dispersion = 0) == c(Inf, 0, 0) dinvgauss(c(0, Inf), mean = 1, dispersion = 1) == c(0, 0) dinvgauss(1, mean = Inf, dispersion = 2) == dinvgamma(1, 0.5, scale = 0.25) }) assertWarning(stopifnot(exprs = { is.nan(dinvgauss(-Inf, mean = -1, dispersion = 1)) is.nan(dinvgauss(Inf, mean = 1, dispersion = -1)) is.nan(dinvgauss(1, mean = 1, dispersion = -1)) is.nan(dinvgauss(1, mean = Inf, dispersion = -1)) })) x <- rinvgauss(100, mean = 2, dispersion = 5) for (mu in round(rlnorm(30), 2)) { for (phi in round(rlnorm(30), 2)) stopifnot(exprs = { All.eq(dinvgauss(x, mean = mu, dispersion = phi), 1/sqrt(2*pi*phi*x^3) * exp(-((x/mu - 1)^2)/(2*phi*x))) }) } ## Tests on the cumulative distribution function. assertWarning(stopifnot(exprs = { is.nan(pinvgauss(-Inf, mean = -Inf, dispersion = 1)) is.nan(pinvgauss(Inf, mean = 1, dispersion = -1)) is.nan(pinvgauss(1, mean = Inf, dispersion = -1)) })) x <- c(1:50, 10^c(3:10, 20, 50, 150, 250)) sqx <- sqrt(x) stopifnot(exprs = { pinvgauss(c(0, Inf), mean = 2, dispersion = xMax) == c(0, 1) pinvgauss(c(0, xMax), mean = xMax, dispersion = 0) == c(0, 1) all.equal(pinvgauss(x, 1, dispersion = 1, log = TRUE), log(pnorm(sqx - 1/sqx) + exp(2) * pnorm(-sqx - 1/sqx))) }) ## Tests on pinvgauss for a small (<1e-14) coefficient of variation ## squared (equal to mean * dispersion) where a gamma approximation is ## used (ported from statmod v1.4.29). mu <- 1.5 phi <- 6e-15 cv2 <- mu * phi q <- mu + seq.int(from = -10, to = 10, length.out = 11) * 1e-8 stopifnot(exprs = { identical(pinvgauss(q, mean = mu, dispersion = phi), exp(pgamma(q, shape = 1/cv2, scale = cv2 * mu, log.p = TRUE))) ## test commented out for CRAN ## identical(pinvgauss(q, mean = mu, dispersion = phi), ## statmod::pinvgauss(q, mean = mu, dispersion = phi)) }) ## Tests on qinvgauss for a small (<1e-8) coefficient of variation ## squared (equal to mean * dispersion) where a gamma approximation is ## used (ported from statmod v1.4.29). mu <- 1.5 phi <- 6e-9 cv2 <- phi * mu p <- seq.int(from = 0.05, to = 0.95, by = 0.1) stopifnot(exprs = { identical(qinvgauss(p, mean = mu, dispersion = phi), mu * qgamma(log(p), shape = 1/cv2, scale = cv2, log.p = TRUE)) ## test commented out for CRAN ('All.eq' instead of 'identical' ## here because the order of computations is slightly different ## from statmod) ## All.eq(qinvgauss(p, mean = mu, dispersion = phi), ## statmod::qinvgauss(p, mean = mu, dispersion = phi)) }) ## Tests for small value of 'shape'. Added for the patch in 4294e9c. q <- runif(100) stopifnot(exprs = { All.eq(q, pinvgauss(qinvgauss(q, 0.1, 1e-2), 0.1, 1e-2)) All.eq(q, pinvgauss(qinvgauss(q, 0.1, 1e-6), 0.1, 1e-6)) }) ## Tests for the random number generator after patches for large ## values of phi (or mu) in 7b22ec7 (ported from statmod v1.4.29). ## First, check that the new generator returns the same values as the ## old one for "safe" values. n <- 1000 mu <- round(rlnorm(n), 2) phi <- round(rlnorm(n), 2) xold <- xnew <- numeric(n) for (i in seq_len(n)) { set.seed(1) phi0 <- phi[i] * mu[i] y <- rnorm(1)^2 x <- 1 + phi0/2 * (y - sqrt(4 * y/phi0 + y^2)) xold[i] <- mu[i] * ifelse(runif(1) <= 1/(1 + x), x, 1/x) set.seed(1) xnew[i] <- rinvgauss(1, mean = mu[i], dispersion = phi[i]) } stopifnot(All.eq(xold, xnew)) ## Next, check that the new generator returns only positive values. stopifnot(exprs = { all(rinvgauss(1000, mean = 5e8, dispersion = 1) >= 0) all(rinvgauss(1000, mean = 1, dispersion = 5e8) >= 0) all(rinvgauss(1000, mean = 5e5, dispersion = 5e3) >= 0) }) ## Tests for first three positive, integer moments. set.seed(123) # reset the seed k <- 1:3 for (mu in round(rlnorm(30), 2)) { for (phi in round(rlnorm(30), 2)) stopifnot(exprs = { All.eq(minvgauss(k, mean = mu, dispersion = phi), c(mu, mu^2 * (1 + phi * mu), mu^3 * (1 + 3 * phi * mu + 3 * (phi * mu)^2))) }) } ## Tests for limited expected value. q <- c(0.25, 0.50, 0.75, 0.9, 0.95) # quantiles for (mu in round(rlnorm(30), 2)) { for (phi in round(rlnorm(30), 2)) { limit <- qinvgauss(q, mean = mu, dispersion = phi) stopifnot(exprs = { All.eq(levinvgauss(limit, mean = mu, dispersion = phi), mu * (pnorm((limit/mu - 1)/sqrt(phi * limit)) - exp(2/phi/mu) * pnorm(-(limit/mu + 1)/sqrt(phi * limit))) + limit * pinvgauss(limit, mean = mu, dispersion = phi, lower = FALSE)) }) } } ## GENERALIZED BETA set.seed(123) # reset the seed stopifnot(exprs = { dgenbeta(c(0, 2.5, 5), shape1 = 0, shape2 = 0, shape3 = 3, scale = 5) == c(Inf, 0, Inf) dgenbeta(c(0, 2.5, 5), shape1 = 0, shape2 = 0, shape3 = 0, scale = 5) == c(Inf, 0, Inf) dgenbeta(c(0, 2.5, 5), shape1 = 0, shape2 = 2, shape3 = 0, scale = 5) == c(Inf, 0, 0) dgenbeta(c(0, 2.5, 5), shape1 = 0, shape2 = Inf, shape3 = 3, scale = 5) == c(Inf, 0, 0) dgenbeta(c(0, 2.5, 5), shape1 = 1, shape2 = Inf, shape3 = 3, scale = 5) == c(Inf, 0, 0) dgenbeta(c(0, 2.5, 5), shape1 = Inf, shape2 = Inf, shape3 = 3, scale = 5) == c(0, Inf, 0) dgenbeta(c(0, 2.5, 5), shape1 = Inf, shape2 = Inf, shape3 = Inf, scale = 5) == c(0, 0, Inf) }) nshpar <- 3 # number of shape parameters shpar <- replicate(30, rlnorm(nshpar, 2), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; b <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Be <- beta(a, b) for (s in scpar) { u <- rbeta(100, a, b) y <- u^(1/t) x <- s * y stopifnot(exprs = { all.equal(d1 <- dgenbeta(x, shape1 = a, shape2 = b, shape3 = t, scale = s), d2 <- dgenbeta(y, shape1 = a, shape2 = b, shape3 = t, scale = 1)/s, tolerance = 1e-10) all.equal(d1, t * y^(a*t - 1) * (1 - y^t)^(b - 1)/(s * Be), tolerance = 1e-10) all.equal(d1, t * u^a * (1 - u)^(b - 1)/(x * Be), tolerance = 1e-10) }) } } ## Tests on the cumulative distribution function. scLrg <- 1e300 * c(0.5, 1, 2, 4) stopifnot(exprs = { all.equal(pgenbeta(1e300, shape1 = 3, shape2 = 1, shape3 = rep(c(1, 2), each = length(scLrg)), scale = scLrg, log = TRUE), c(0, pbeta(c(1, 1/2, 1/4), 3, 1, log = TRUE), 0, pbeta(c(1, 1/4, 1/16), 3, 1, log = TRUE))) }) ## Tests for first three positive moments and first two negative ## moments. ## ## Simulation of new parameters ensuring that said moments exist. set.seed(123) # reset the seed nshpar <- 3 # number of shape parameters shpar <- replicate(30, sqrt(c(3, 0, 3)) + rlnorm(nshpar, 2), simplify = FALSE) scpar <- rlnorm(30, 2) # scale parameters k <- c(-2, -1, 1, 2, 3) # orders for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; b <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Be <- beta(a, b) for (s in scpar) stopifnot(exprs = { All.eq(mgenbeta(k, shape1 = a, shape2 = b, shape3 = t, scale = s), s^k * beta(a + k/t, b)/Be) }) } ## Tests for first three positive limited moments and first two ## negative limited moments. ## ## Simulation of new parameters ensuring that said moments exist. order <- c(-2, -1, 1, 2, 3) # orders q <- c(0.25, 0.50, 0.75, 0.9, 0.95) # quantiles for (i in seq_along(shpar)) { a <- shpar[[c(i, 1)]]; g <- shpar[[c(i, 2)]]; t <- shpar[[c(i, 3)]] Be <- beta(a, b) for (s in scpar) { limit <- qgenbeta(q, shape1 = a, shape2 = b, shape3 = t, scale = s) u <- (limit/s)^t for (k in order) stopifnot(exprs = { All.eq(levgenbeta(limit, order = k, shape1 = a, shape2 = b, shape3 = t, scale = s), s^k * beta(a + k/t, b)/Be * pbeta(u, a + k/t, b) + limit^k * pbeta(u, a, b, lower = FALSE)) }) } } ## ## RANDOM NUMBERS (all continuous distributions) ## set.seed(123) n <- 20 m <- rnorm(1) ## Generate variates Rfpareto <- rfpareto(n, min = m, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) Rpareto4 <- rpareto4(n, min = m, shape1 = 0.8, shape2 = 1.5, scale = 2) Rpareto3 <- rpareto3(n, min = m, shape = 1.5, scale = 2) Rpareto2 <- rpareto2(n, min = m, shape = 0.8, scale = 2) Rtrbeta <- rtrbeta (n, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) Rburr <- rburr (n, shape1 = 0.8, shape2 = 1.5, scale = 2) Rllogis <- rllogis (n, shape = 1.5, scale = 2) Rparalogis <- rparalogis (n, shape = 0.8, scale = 2) Rgenpareto <- rgenpareto (n, shape1 = 0.8, shape2 = 2, scale = 2) Rpareto <- rpareto (n, shape = 0.8, scale = 2) Rpareto1 <- rpareto1 (n, shape = 0.8, min = 2) Rinvburr <- rinvburr (n, shape1 = 1.5, shape2 = 2, scale = 2) Rinvpareto <- rinvpareto (n, shape = 2, scale = 2) Rinvparalogis <- rinvparalogis(n, shape = 2, scale = 2) Rtrgamma <- rtrgamma (n, shape1 = 2, shape2 = 3, scale = 5) Rinvtrgamma <- rinvtrgamma (n, shape1 = 2, shape2 = 3, scale = 5) Rinvgamma <- rinvgamma (n, shape = 2, scale = 5) Rinvweibull <- rinvweibull (n, shape = 3, scale = 5) Rinvexp <- rinvexp (n, scale = 5) Rlgamma <- rlgamma(n, shapelog = 1.5, ratelog = 5) Rgumbel <- rgumbel(n, alpha = 2, scale = 5) Rinvgauss <- rinvgauss(n, mean = 2, dispersion = 5) Rgenbeta <- rgenbeta(n, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) ## Compute quantiles Pfpareto <- pfpareto(Rfpareto, min = m, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) Ppareto4 <- ppareto4(Rpareto4, min = m, shape1 = 0.8, shape2 = 1.5, scale = 2) Ppareto3 <- ppareto3(Rpareto3, min = m, shape = 1.5, scale = 2) Ppareto2 <- ppareto2(Rpareto2, min = m, shape = 0.8, scale = 2) Ptrbeta <- ptrbeta (Rtrbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) Pburr <- pburr (Rburr, shape1 = 0.8, shape2 = 1.5, scale = 2) Pllogis <- pllogis (Rllogis, shape = 1.5, scale = 2) Pparalogis <- pparalogis (Rparalogis, shape = 0.8, scale = 2) Pgenpareto <- pgenpareto (Rgenpareto, shape1 = 0.8, shape2 = 2, scale = 2) Ppareto <- ppareto (Rpareto, shape = 0.8, scale = 2) Ppareto1 <- ppareto1 (Rpareto1, shape = 0.8, min = 2) Pinvburr <- pinvburr (Rinvburr, shape1 = 1.5, shape2 = 2, scale = 2) Pinvpareto <- pinvpareto (Rinvpareto, shape = 2, scale = 2) Pinvparalogis <- pinvparalogis(Rinvparalogis, shape = 2, scale = 2) Ptrgamma <- ptrgamma (Rtrgamma, shape1 = 2, shape2 = 3, scale = 5) Pinvtrgamma <- pinvtrgamma (Rinvtrgamma, shape1 = 2, shape2 = 3, scale = 5) Pinvgamma <- pinvgamma (Rinvgamma, shape = 2, scale = 5) Pinvweibull <- pinvweibull (Rinvweibull, shape = 3, scale = 5) Pinvexp <- pinvexp (Rinvexp, scale = 5) Plgamma <- plgamma(Rlgamma, shapelog = 1.5, ratelog = 5) Pgumbel <- pgumbel(Rgumbel, alpha = 2, scale = 5) Pinvgauss <- pinvgauss(Rinvgauss, mean = 2, dispersion = 5) Pgenbeta <- pgenbeta(Rgenbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) ## Just compute pdf Dfpareto <- dfpareto(Rfpareto, min = m, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) Dpareto4 <- dpareto4(Rpareto4, min = m, shape1 = 0.8, shape2 = 1.5, scale = 2) Dpareto3 <- dpareto3(Rpareto3, min = m, shape = 1.5, scale = 2) Dpareto2 <- dpareto2(Rpareto2, min = m, shape = 0.8, scale = 2) Dtrbeta <- dtrbeta (Rtrbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) Dburr <- dburr (Rburr, shape1 = 0.8, shape2 = 1.5, scale = 2) Dllogis <- dllogis (Rllogis, shape = 1.5, scale = 2) Dparalogis <- dparalogis (Rparalogis, shape = 0.8, scale = 2) Dgenpareto <- dgenpareto (Rgenpareto, shape1 = 0.8, shape2 = 2, scale = 2) Dpareto <- dpareto (Rpareto, shape = 0.8, scale = 2) Dpareto1 <- dpareto1 (Rpareto1, shape = 0.8, min = 2) Dinvburr <- dinvburr (Rinvburr, shape1 = 1.5, shape2 = 2, scale = 2) Dinvpareto <- dinvpareto (Rinvpareto, shape = 2, scale = 2) Dinvparalogis <- dinvparalogis(Rinvparalogis, shape = 2, scale = 2) Dtrgamma <- dtrgamma (Rtrgamma, shape1 = 2, shape2 = 3, scale = 5) Dinvtrgamma <- dinvtrgamma (Rinvtrgamma, shape1 = 2, shape2 = 3, scale = 5) Dinvgamma <- dinvgamma (Rinvtrgamma, shape = 2, scale = 5) Dinvweibull <- dinvweibull (Rinvweibull, shape = 3, scale = 5) Dinvexp <- dinvexp (Rinvexp, scale = 5) Dlgamma <- dlgamma(Rlgamma, shapelog = 1.5, ratelog = 5) Dgumbel <- dgumbel(Rgumbel, alpha = 2, scale = 5) Dinvgauss <- dinvgauss(Rinvgauss, mean = 2, dispersion = 5) Dgenbeta <- dgenbeta(Rgenbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2) ## Check q(p(.)) identity stopifnot(exprs = { All.eq(Rfpareto, qfpareto(Pfpareto, min = m, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2)) All.eq(Rpareto4, qpareto4(Ppareto4, min = m, shape1 = 0.8, shape2 = 1.5, scale = 2)) All.eq(Rpareto3, qpareto3(Ppareto3, min = m, shape = 1.5, scale = 2)) All.eq(Rpareto2, qpareto2(Ppareto2, min = m, shape = 0.8, scale = 2)) All.eq(Rtrbeta, qtrbeta (Ptrbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2)) All.eq(Rburr, qburr (Pburr, shape1 = 0.8, shape2 = 1.5, scale = 2)) All.eq(Rllogis, qllogis (Pllogis, shape = 1.5, scale = 2)) All.eq(Rparalogis, qparalogis (Pparalogis, shape = 0.8, scale = 2)) All.eq(Rgenpareto, qgenpareto (Pgenpareto, shape1 = 0.8, shape2 = 2, scale = 2)) All.eq(Rpareto, qpareto (Ppareto, shape = 0.8, scale = 2)) All.eq(Rpareto1, qpareto1 (Ppareto1, shape = 0.8, min = 2)) All.eq(Rinvburr, qinvburr (Pinvburr, shape1 = 1.5, shape2 = 2, scale = 2)) All.eq(Rinvpareto, qinvpareto (Pinvpareto, shape = 2, scale = 2)) All.eq(Rinvparalogis, qinvparalogis(Pinvparalogis, shape = 2, scale = 2)) All.eq(Rtrgamma, qtrgamma (Ptrgamma, shape1 = 2, shape2 = 3, scale = 5)) All.eq(Rinvtrgamma, qinvtrgamma (Pinvtrgamma, shape1 = 2, shape2 = 3, scale = 5)) All.eq(Rinvgamma, qinvgamma (Pinvgamma, shape = 2, scale = 5)) All.eq(Rinvweibull, qinvweibull (Pinvweibull, shape = 3, scale = 5)) All.eq(Rinvexp, qinvexp (Pinvexp, scale = 5)) All.eq(Rlgamma, qlgamma(Plgamma, shapelog = 1.5, ratelog = 5)) All.eq(Rgumbel, qgumbel(Pgumbel, alpha = 2, scale = 5)) All.eq(Rinvgauss, qinvgauss(Pinvgauss, mean = 2, dispersion = 5)) All.eq(Rgenbeta, qgenbeta(Pgenbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2)) }) ## Check q(p(.)) identity for special cases stopifnot(exprs = { All.eq(Rfpareto - m, qtrbeta(Pfpareto, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2)) All.eq(Rpareto4 - m, qburr (Ppareto4, shape1 = 0.8, shape2 = 1.5, scale = 2)) All.eq(Rpareto3 - m, qllogis(Ppareto3, shape = 1.5, scale = 2)) All.eq(Rpareto2 - m, qpareto(Ppareto2, shape = 0.8, scale = 2)) }) ## Check q(p(.)) identity with upper tail stopifnot(exprs = { All.eq(Rfpareto, qfpareto(1 - Pfpareto, min = m, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, lower = FALSE)) All.eq(Rpareto4, qpareto4(1 - Ppareto4, min = m, shape1 = 0.8, shape2 = 1.5, scale = 2, lower = FALSE)) All.eq(Rpareto3, qpareto3(1 - Ppareto3, min = m, shape = 1.5, scale = 2, lower = FALSE)) All.eq(Rpareto2, qpareto2(1 - Ppareto2, min = m, shape = 0.8, scale = 2, lower = FALSE)) All.eq(Rtrbeta, qtrbeta (1 - Ptrbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, lower = FALSE)) All.eq(Rburr, qburr (1 - Pburr, shape1 = 0.8, shape2 = 1.5, scale = 2, lower = FALSE)) All.eq(Rllogis, qllogis (1 - Pllogis, shape = 1.5, scale = 2, lower = FALSE)) All.eq(Rparalogis, qparalogis (1 - Pparalogis, shape = 0.8, scale = 2, lower = FALSE)) All.eq(Rgenpareto, qgenpareto (1 - Pgenpareto, shape1 = 0.8, shape2 = 2, scale = 2, lower = FALSE)) All.eq(Rpareto, qpareto (1 - Ppareto, shape = 0.8, scale = 2, lower = FALSE)) All.eq(Rpareto1, qpareto1 (1 - Ppareto1, shape = 0.8, min = 2, lower = FALSE)) All.eq(Rinvburr, qinvburr (1 - Pinvburr, shape1 = 1.5, shape2 = 2, scale = 2, lower = FALSE)) All.eq(Rinvpareto, qinvpareto (1 - Pinvpareto, shape = 2, scale = 2, lower = FALSE)) All.eq(Rinvparalogis, qinvparalogis(1 - Pinvparalogis, shape = 2, scale = 2, lower = FALSE)) All.eq(Rtrgamma, qtrgamma (1 - Ptrgamma, shape1 = 2, shape2 = 3, scale = 5, lower = FALSE)) All.eq(Rinvtrgamma, qinvtrgamma (1 - Pinvtrgamma, shape1 = 2, shape2 = 3, scale = 5, lower = FALSE)) All.eq(Rinvgamma, qinvgamma (1 - Pinvgamma, shape = 2, scale = 5, lower = FALSE)) All.eq(Rinvweibull, qinvweibull (1 - Pinvweibull, shape = 3, scale = 5, lower = FALSE)) All.eq(Rinvexp, qinvexp (1 - Pinvexp, scale = 5, lower = FALSE)) All.eq(Rlgamma, qlgamma(1 - Plgamma, shapelog = 1.5, ratelog = 5, lower = FALSE)) All.eq(Rgumbel, qgumbel(1 - Pgumbel, alpha = 2, scale = 5, lower = FALSE)) All.eq(Rinvgauss, qinvgauss(1 - Pinvgauss, mean = 2, dispersion = 5, lower = FALSE)) All.eq(Rgenbeta, qgenbeta(1 - Pgenbeta, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, lower = FALSE)) }) ## Check q(p(., log), log) identity stopifnot(exprs = { All.eq(Rfpareto, qfpareto(log(Pfpareto), min = m, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, log = TRUE)) All.eq(Rpareto4, qpareto4(log(Ppareto4), min = m, shape1 = 0.8, shape2 = 1.5, scale = 2, log = TRUE)) All.eq(Rpareto3, qpareto3(log(Ppareto3), min = m, shape = 1.5, scale = 2, log = TRUE)) All.eq(Rpareto2, qpareto2(log(Ppareto2), min = m, shape = 0.8, scale = 2, log = TRUE)) All.eq(Rtrbeta, qtrbeta (log(Ptrbeta), shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, log = TRUE)) All.eq(Rburr, qburr (log(Pburr), shape1 = 0.8, shape2 = 1.5, scale = 2, log = TRUE)) All.eq(Rllogis, qllogis (log(Pllogis), shape = 1.5, scale = 2, log = TRUE)) All.eq(Rparalogis, qparalogis (log(Pparalogis), shape = 0.8, scale = 2, log = TRUE)) All.eq(Rgenpareto, qgenpareto (log(Pgenpareto), shape1 = 0.8, shape2 = 2, scale = 2, log = TRUE)) All.eq(Rpareto, qpareto (log(Ppareto), shape = 0.8, scale = 2, log = TRUE)) All.eq(Rpareto1, qpareto1 (log(Ppareto1), shape = 0.8, min = 2, log = TRUE)) All.eq(Rinvburr, qinvburr (log(Pinvburr), shape1 = 1.5, shape2 = 2, scale = 2, log = TRUE)) All.eq(Rinvpareto, qinvpareto (log(Pinvpareto), shape = 2, scale = 2, log = TRUE)) All.eq(Rinvparalogis, qinvparalogis(log(Pinvparalogis), shape = 2, scale = 2, log = TRUE)) All.eq(Rtrgamma, qtrgamma (log(Ptrgamma), shape1 = 2, shape2 = 3, scale = 5, log = TRUE)) All.eq(Rinvtrgamma, qinvtrgamma (log(Pinvtrgamma), shape1 = 2, shape2 = 3, scale = 5, log = TRUE)) All.eq(Rinvgamma, qinvgamma (log(Pinvgamma), shape = 2, scale = 5, log = TRUE)) All.eq(Rinvweibull, qinvweibull (log(Pinvweibull), shape = 3, scale = 5, log = TRUE)) All.eq(Rinvexp, qinvexp (log(Pinvexp), scale = 5, log = TRUE)) All.eq(Rlgamma, qlgamma(log(Plgamma), shapelog = 1.5, ratelog = 5, log = TRUE)) All.eq(Rgumbel, qgumbel(log(Pgumbel), alpha = 2, scale = 5, log = TRUE)) All.eq(Rinvgauss, qinvgauss(log(Pinvgauss), mean = 2, dispersion = 5, log = TRUE)) All.eq(Rgenbeta, qgenbeta(log(Pgenbeta), shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, log = TRUE)) }) ## Check q(p(., log), log) identity with upper tail stopifnot(exprs = { All.eq(Rfpareto, qfpareto(log1p(-Pfpareto), min = m, shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rpareto4, qpareto4(log1p(-Ppareto4), min = m, shape1 = 0.8, shape2 = 1.5, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rpareto3, qpareto3(log1p(-Ppareto3), min = m, shape = 1.5, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rpareto2, qpareto2(log1p(-Ppareto2), min = m, shape = 0.8, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rtrbeta, qtrbeta (log1p(-Ptrbeta), shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rburr, qburr (log1p(-Pburr), shape1 = 0.8, shape2 = 1.5, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rllogis, qllogis (log1p(-Pllogis), shape = 1.5, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rparalogis, qparalogis (log1p(-Pparalogis), shape = 0.8, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rgenpareto, qgenpareto (log1p(-Pgenpareto), shape1 = 0.8, shape2 = 2, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rpareto, qpareto (log1p(-Ppareto), shape = 0.8, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rpareto1, qpareto1 (log1p(-Ppareto1), shape = 0.8, min = 2, lower = FALSE, log = TRUE)) All.eq(Rinvburr, qinvburr (log1p(-Pinvburr), shape1 = 1.5, shape2 = 2, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rinvpareto, qinvpareto (log1p(-Pinvpareto), shape = 2, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rinvparalogis, qinvparalogis(log1p(-Pinvparalogis), shape = 2, scale = 2, lower = FALSE, log = TRUE)) All.eq(Rtrgamma, qtrgamma (log1p(-Ptrgamma), shape1 = 2, shape2 = 3, scale = 5, lower = FALSE, log = TRUE)) All.eq(Rinvtrgamma, qinvtrgamma (log1p(-Pinvtrgamma), shape1 = 2, shape2 = 3, scale = 5, lower = FALSE, log = TRUE)) All.eq(Rinvgamma, qinvgamma (log1p(-Pinvgamma), shape = 2, scale = 5, lower = FALSE, log = TRUE)) All.eq(Rinvweibull, qinvweibull (log1p(-Pinvweibull), shape = 3, scale = 5, lower = FALSE, log = TRUE)) All.eq(Rinvexp, qinvexp (log1p(-Pinvexp), scale = 5, lower = FALSE, log = TRUE)) All.eq(Rlgamma, qlgamma(log1p(-Plgamma), shapelog = 1.5, ratelog = 5, lower = FALSE, log = TRUE)) All.eq(Rgumbel, qgumbel(log1p(-Pgumbel), alpha = 2, scale = 5, lower = FALSE, log = TRUE)) All.eq(Rinvgauss, qinvgauss(log1p(-Pinvgauss), mean = 2, dispersion = 5, lower = FALSE, log = TRUE)) All.eq(Rgenbeta, qgenbeta(log1p(-Pgenbeta), shape1 = 0.8, shape2 = 1.5, shape3 = 2, scale = 2, lower = FALSE, log = TRUE)) }) ### ### DISCRETE DISTRIBUTIONS ### ## Reset seed set.seed(123) ## Define a small function to compute probabilities for the (a, b, 1) ## family of discrete distributions using the recursive relation ## ## p[k] = (a + b/k)p[k - 1], k = 2, 3, ... ## ## for a, b and p[1] given. dab1 <- function(x, a, b, p1) { x <- floor(x) if (x < 1) stop("recursive computations possible for x >= 2 only") for (k in seq(2, length.out = x - 1)) { p2 <- (a + b/k) * p1 p1 <- p2 } p1 } ## ZERO-TRUNCATED (a, b, 1) CLASS ## Tests on the probability mass function: ## ## 1. probability is 0 at x = 0; ## 2. pmf satisfies the recursive relation ## 3. limiting cases have mass 1 at x = 1 ## 4. logarithmic is a special case of ztnbinom lambda <- rlnorm(30, 2) # Poisson parameters r <- lambda # size for negative binomial prob <- runif(30) # probs size <- round(lambda) # size for binomial x <- sapply(size, sample, size = 1) stopifnot(exprs = { dztpois(0, lambda) == 0 dztnbinom(0, r, prob) == 0 dztgeom(0, prob) == 0 dztbinom(0, size, prob) == 0 dlogarithmic(0, prob) == 0 }) stopifnot(exprs = { All.eq(dztpois(x, lambda), mapply(dab1, x, a = 0, b = lambda, p1 = lambda/(exp(lambda) - 1))) All.eq(dztnbinom(x, r, prob), mapply(dab1, x, a = 1 - prob, b = (r - 1) * (1 - prob), p1 = r * prob^r * (1 - prob)/(1 - prob^r))) All.eq(dztgeom(x, prob), mapply(dab1, x, a = 1 - prob, b = 0, p1 = prob)) All.eq(dztbinom(x, size, prob), mapply(dab1, x, a = -prob/(1 - prob), b = (size + 1) * prob/(1 - prob), p1 = size * prob * (1 - prob)^(size - 1)/(1 - (1 - prob)^size))) All.eq(dlogarithmic(x, prob), mapply(dab1, x, a = prob, b = -prob, p1 = -prob/log1p(-prob))) }) p01 <- as.numeric(x == 1) stopifnot(exprs = { dztpois(x, 0) == p01 dztnbinom(x, r, 1) == p01 dztgeom(x, 1) == p01 dztbinom(x, 1, prob) == p01 dztbinom(x, size, 0) == p01 dlogarithmic(x, 0) == p01 }) stopifnot(exprs = { dztnbinom(x, 0, prob) == dlogarithmic(x, 1 - prob) }) ## Tests on the cumulative distribution function. ## ## 1. cdf is equal to cumulative sum of pdf ## 2. limiting cases have mass 1 at x = 1 ## 3. logarithmic is a special case of ztnbinom for (l in lambda) stopifnot(exprs = { all.equal(cumsum(dztpois(0:20, l)), pztpois(0:20, l), tolerance = 1e-8) }) for (i in seq_along(r)) stopifnot(exprs = { all.equal(cumsum(dztnbinom(0:20, r[i], prob[i])), pztnbinom(0:20, r[i], prob[i]), tolerance = 1e-8) }) for (i in seq_along(r)) stopifnot(exprs = { all.equal(cumsum(dztgeom(0:20, prob[i])), pztgeom(0:20, prob[i]), tolerance = 1e-8) }) for (i in seq_along(size)) stopifnot(exprs = { all.equal(cumsum(dztbinom(0:20, size[i], prob[i])), pztbinom(0:20, size[i], prob[i]), tolerance = 1e-8) }) for (p in prob) stopifnot(exprs = { all.equal(cumsum(dlogarithmic(0:20, p)), plogarithmic(0:20, p), tolerance = 1e-8) }) p01 <- as.numeric(x >= 1) stopifnot(exprs = { pztpois(x, 0) == p01 pztnbinom(x, r, 1) == p01 pztgeom(x, 1) == p01 pztbinom(x, 1, prob) == p01 pztbinom(x, size, 0) == p01 plogarithmic(x, 0) == p01 }) stopifnot(exprs = { All.eq(pztnbinom(x, 0, prob), plogarithmic(x, 1 - prob)) }) ## Tests on the limiting cases of the quantile function. p <- c(0, runif(20), 1) prob <- 0.2 stopifnot(exprs = { qztpois(p, lambda = 0, TRUE, FALSE) == 1 qztpois(p, lambda = 0, FALSE, FALSE) == 1 qztpois(log(p), lambda = 0, TRUE, TRUE) == 1 qztpois(log(p), lambda = 0, FALSE, TRUE) == 1 qztnbinom(p, 5, prob = 1, TRUE, FALSE) == 1 qztnbinom(p, 5, prob = 1, FALSE, FALSE) == 1 qztnbinom(log(p), 5, prob = 1, TRUE, TRUE) == 1 qztnbinom(log(p), 5, prob = 1, FALSE, TRUE) == 1 qztnbinom(p, size = 0, prob, TRUE, FALSE) == qlogarithmic(p, 1 - prob, TRUE, FALSE) qztnbinom(p, size = 0, prob, FALSE, FALSE) == qlogarithmic(p, 1 - prob, FALSE, FALSE) qztnbinom(log(p), size = 0, prob, TRUE, TRUE) == qlogarithmic(log(p), 1 - prob, TRUE, TRUE) qztnbinom(log(p), size = 0, prob, FALSE, TRUE) == qlogarithmic(log(p), 1 - prob, FALSE, TRUE) qztgeom(p, prob = 1, TRUE, FALSE) == 1 qztgeom(p, prob = 1, FALSE, FALSE) == 1 qztgeom(log(p), prob = 1, TRUE, TRUE) == 1 qztgeom(log(p), prob = 1, FALSE, TRUE) == 1 qztbinom(p, size = 1, 0.5, TRUE, FALSE) == 1 qztbinom(p, size = 1, 0.5, FALSE, FALSE) == 1 qztbinom(log(p), size = 1, 0.5, TRUE, TRUE) == 1 qztbinom(log(p), size = 1, 0.5, FALSE, TRUE) == 1 qztbinom(p, 5, prob = 0, TRUE, FALSE) == 1 qztbinom(p, 5, prob = 0, FALSE, FALSE) == 1 qztbinom(log(p), 5, prob = 0, TRUE, TRUE) == 1 qztbinom(log(p), 5, prob = 0, FALSE, TRUE) == 1 qlogarithmic(p, prob = 0, TRUE, FALSE) == 1 qlogarithmic(p, prob = 0, FALSE, FALSE) == 1 qlogarithmic(log(p), prob = 0, TRUE, TRUE) == 1 qlogarithmic(log(p), prob = 0, FALSE, TRUE) == 1 }) ## Tests at the extremes of the quantile function. p <- c(0, 1) q <- c(1, Inf) lower <- rep(c(TRUE, FALSE), each = 2) stopifnot(exprs = { qztpois(p, 1, lower, FALSE) == q qztpois(log(p), 1, lower, TRUE) == q qztnbinom(p, 5, 0.5, lower, FALSE) == q qztnbinom(log(p), 5, 0.5, lower, TRUE) == q qztgeom(p, 0.5, lower, FALSE) == q qztgeom(log(p), 0.5, lower, TRUE) == q qztbinom(p, 5, 0.5, lower, FALSE) == q qztbinom(log(p), 5, 0.5, lower, TRUE) == q qlogarithmic(p, 0.5, lower, FALSE) == q qlogarithmic(log(p), 0.5, lower, TRUE) == q }) ## ZERO-MODIFIED (a, b, 1) CLASS ## Tests on the probability mass function: ## ## 1. probability is p0 at x = 0 ## 2. pmf satisfies the recursive relation ## 3. limiting cases have mass 1 at x = 1 ## 4. zmlogarithmic is a special case of zmnbinom ## 5. zm* with p0 = 0 is equal to zt* lambda <- rlnorm(30, 2) # Poisson parameters r <- lambda # size for negative binomial prob <- runif(30) # probs size <- round(lambda) # size for binomial p0 <- c(0, runif(28), 1) # probs at 0 x <- sapply(size, sample, size = 1) stopifnot(exprs = { dzmpois(0, lambda, p0) == p0 dzmnbinom(0, r, prob, p0) == p0 dzmgeom(0, prob, p0) == p0 dzmbinom(0, size, prob, p0) == p0 dzmlogarithmic(0, prob, p0) == p0 }) stopifnot(exprs = { All.eq(dzmpois(x, lambda, p0), mapply(dab1, x, a = 0, b = lambda, p1 = (1 - p0) *lambda/(exp(lambda) - 1))) All.eq(dzmnbinom(x, r, prob, p0), mapply(dab1, x, a = 1 - prob, b = (r - 1) * (1 - prob), p1 = (1 - p0) * r * prob^r * (1 - prob)/(1 - prob^r))) All.eq(dzmgeom(x, prob, p0), mapply(dab1, x, a = 1 - prob, b = 0, p1 = (1 - p0) * prob)) All.eq(dzmbinom(x, size, prob, p0), mapply(dab1, x, a = -prob/(1 - prob), b = (size + 1) * prob/(1 - prob), p1 = (1 - p0) * size * prob * (1 - prob)^(size - 1)/(1 - (1 - prob)^size))) All.eq(dzmlogarithmic(x, prob, p0), mapply(dab1, x, a = prob, b = -prob, p1 = -(1 - p0) * prob/log1p(-prob))) }) p01 <- p0 * (x == 0) + (1 - p0) * (x == 1) stopifnot(exprs = { dzmpois(x, 0, p0) == p01 dzmnbinom(x, r, 1, p0) == p01 dzmgeom(x, 1, p0) == p01 dzmbinom(x, 1, prob, p0) == p01 dzmbinom(x, size, 0, p0) == p01 dzmlogarithmic(x, 0, p0) == p01 }) stopifnot(exprs = { dzmnbinom(x, 0, prob, p0) == dzmlogarithmic(x, 1 - prob, p0) }) stopifnot(exprs = { dzmpois(x, lambda, 0) == dztpois(x, lambda) dzmnbinom(x, r, prob, 0) == dztnbinom(x, r, prob) dzmgeom(x, prob, 0) == dztgeom(x, prob) dzmbinom(x, size, prob, 0) == dztbinom(x, size, prob) dzmlogarithmic(x, prob, 0) == dlogarithmic(x, prob) }) ## Tests on cumulative distribution function. ## ## 1. cdf is equal to cumulative sum of pdf ## 2. limiting cases have mass 1 at x = 1 ## 3. zmlogarithmic is a special case of zmnbinom ## 4. zm* with p0 = 0 is equal to zt* for (i in seq_along(lambda)) stopifnot(exprs = { all.equal(cumsum(dzmpois(0:20, lambda[i], p0 = p0[i])), pzmpois(0:20, lambda[i], p0 = p0[i]), tolerance = 1e-8) }) for (i in seq_along(r)) stopifnot(exprs = { all.equal(cumsum(dzmnbinom(0:20, r[i], prob[i], p0[i])), pzmnbinom(0:20, r[i], prob[i], p0[i]), tolerance = 1e-8) }) for (i in seq_along(r)) stopifnot(exprs = { all.equal(cumsum(dzmgeom(0:20, prob[i], p0[i])), pzmgeom(0:20, prob[i], p0[i]), tolerance = 1e-8) }) for (i in seq_along(size)) stopifnot(exprs = { all.equal(cumsum(dzmbinom(0:20, size[i], prob[i], p0[i])), pzmbinom(0:20, size[i], prob[i], p0[i]), tolerance = 1e-8) }) for (i in seq_along(prob)) stopifnot(exprs = { all.equal(cumsum(dzmlogarithmic(0:20, prob[i], p0[i])), pzmlogarithmic(0:20, prob[i], p0[i]), tolerance = 1e-8) }) p01 <- p0 * (x < 1) + (x >= 1) stopifnot(exprs = { pzmpois(x, 0, p0) == p01 pzmnbinom(x, r, 1, p0) == p01 pzmgeom(x, 1, p0) == p01 pzmbinom(x, 1, prob, p0) == p01 pzmbinom(x, size, 0, p0) == p01 pzmlogarithmic(x, 0, p0) == p01 }) stopifnot(exprs = { All.eq(pzmnbinom(x, 0, prob, p0), pzmlogarithmic(x, 1 - prob, p0)) }) stopifnot(exprs = { All.eq(pzmpois(x, lambda, 0), pztpois(x, lambda)) All.eq(pzmnbinom(x, r, prob, 0), pztnbinom(x, r, prob)) All.eq(pzmgeom(x, prob, 0), pztgeom(x, prob)) All.eq(pzmbinom(x, size, prob, 0), pztbinom(x, size, prob)) All.eq(pzmlogarithmic(0, prob, 0), plogarithmic(0, prob)) }) ## Tests on the limiting cases of the quantile function. p <- c(0, runif(20), 1) p0m <- 0.2 stopifnot(exprs = { qzmpois(p, lambda = 0, p0m, TRUE, FALSE) == 1 - (p <= p0m) qzmpois(p, lambda = 0, p0m, FALSE, FALSE) == 1 - (1 - p <= p0m) qzmpois(log(p), lambda = 0, p0m, TRUE, TRUE) == 1 - (p <= p0m) qzmpois(log(p), lambda = 0, p0m, FALSE, TRUE) == 1 - (1 - p <= p0m) qzmnbinom(p, size = 5, 1, p0m, TRUE, FALSE) == 1 - (p <= p0m) qzmnbinom(p, size = 5, 1, p0m, FALSE, FALSE) == 1 - (1 - p <= p0m) qzmnbinom(log(p), size = 5, 1, p0m, TRUE, TRUE) == 1 - (p <= p0m) qzmnbinom(log(p), size = 5, 1, p0m, FALSE, TRUE) == 1 - (1 - p <= p0m) qzmnbinom(p, size = 0, prob, p0m, TRUE, FALSE) == qzmlogarithmic(p, 1 - prob, p0m, TRUE, FALSE) qzmnbinom(p, size = 0, prob, p0m, FALSE, FALSE) == qzmlogarithmic(p, 1 - prob, p0m, FALSE, FALSE) qzmnbinom(log(p), size = 0, prob, p0m, TRUE, TRUE) == qzmlogarithmic(log(p), 1 - prob, p0m, TRUE, TRUE) qzmnbinom(log(p), size = 0, prob, p0m, FALSE, TRUE) == qzmlogarithmic(log(p), 1 - prob, p0m, FALSE, TRUE) qzmgeom(p, 1, p0m, TRUE, FALSE) == 1 - (p <= p0m) qzmgeom(p, 1, p0m, FALSE, FALSE) == 1 - (1 - p <= p0m) qzmgeom(log(p), 1, p0m, TRUE, TRUE) == 1 - (p <= p0m) qzmgeom(log(p), 1, p0m, FALSE, TRUE) == 1 - (1 - p <= p0m) qzmbinom(p, size = 1, 0.5, p0m, TRUE, FALSE) == 1 - (p <= p0m) qzmbinom(p, size = 1, 0.5, p0m, FALSE, FALSE) == 1 - (1 - p <= p0m) qzmbinom(log(p), size = 1, 0.5, p0m, TRUE, TRUE) == 1 - (p <= p0m) qzmbinom(log(p), size = 1, 0.5, p0m, FALSE, TRUE) == 1 - (1 - p <= p0m) qzmbinom(p, 5, prob = 0, p0m, TRUE, FALSE) == 1 - (p <= p0m) qzmbinom(p, 5, prob = 0, p0m, FALSE, FALSE) == 1 - (1 - p <= p0m) qzmbinom(log(p), 5, prob = 0, p0m, TRUE, TRUE) == 1 - (p <= p0m) qzmbinom(log(p), 5, prob = 0, p0m, FALSE, TRUE) == 1 - (1 - p <= p0m) qzmlogarithmic(p, 0, p0m, TRUE, FALSE) == 1 - (p <= p0m) qzmlogarithmic(p, 0, p0m, FALSE, FALSE) == 1 - (1 - p <= p0m) qzmlogarithmic(log(p), 0, p0m, TRUE, TRUE) == 1 - (p <= p0m) qzmlogarithmic(log(p), 0, p0m, FALSE, TRUE) == 1 - (1 - p <= p0m) }) ## Tests with p0 = 0 at 0 stopifnot(exprs = { qzmpois(0, 2, p0 = 0, TRUE, FALSE) == 1 qzmpois(1, 2, p0 = 0, FALSE, FALSE) == 1 qzmpois(-Inf, 2, p0 = 0, TRUE, TRUE) == 1 qzmpois(0, 2, p0 = 0, FALSE, TRUE) == 1 qzmnbinom(0, 7, prob = 0.8, p0 = 0, TRUE, FALSE) == 1 qzmnbinom(1, 7, prob = 0.8, p0 = 0, FALSE, FALSE) == 1 qzmnbinom(-Inf, 7, prob = 0.8, p0 = 0, TRUE, TRUE) == 1 qzmnbinom(0, 7, prob = 0.8, p0 = 0, FALSE, TRUE) == 1 qzmgeom(0, 0.8, p0 = 0, TRUE, FALSE) == 1 qzmgeom(1, 0.8, p0 = 0, FALSE, FALSE) == 1 qzmgeom(-Inf, 0.8, p0 = 0, TRUE, TRUE) == 1 qzmgeom(0, 0.8, p0 = 0, FALSE, TRUE) == 1 qzmbinom(0, 5, 0.5, p0 = 0, TRUE, FALSE) == 1 qzmbinom(1, 5, 0.5, p0 = 0, FALSE, FALSE) == 1 qzmbinom(-Inf, 5, 0.5, p0 = 0, TRUE, TRUE) == 1 qzmbinom(0, 5, 0.5, p0 = 0, FALSE, TRUE) == 1 qzmlogarithmic(0, 0.9, p0 = 0, TRUE, FALSE) == 1 qzmlogarithmic(1, 0.9, p0 = 0, FALSE, FALSE) == 1 qzmlogarithmic(-Inf, 0.9, p0 = 0, TRUE, TRUE) == 1 qzmlogarithmic(0, 0.9, p0 = 0, FALSE, TRUE) == 1 }) ## Tests with p0 = 1 ('lower.tail' and 'log.p' without effect) stopifnot(exprs = { qzmpois (p, 2, p0 = 1) == 0 qzmnbinom (p, 7, 0.8, p0 = 1) == 0 qzmgeom (p, 0.8, p0 = 1) == 0 qzmbinom (p, 5, 0.5, p0 = 1) == 0 qzmlogarithmic(p, 0.9, p0 = 1) == 0 }) ## Tests at the extremes of the quantile function. p <- c(0, 1) q <- c(0, Inf) stopifnot(exprs = { qzmpois(p, lambda = 2, p0m, log.p = FALSE) == q qzmpois(log(p), lambda = 2, p0m, log.p = TRUE) == q qzmbinom(p, size = 5, prob = 0.5, p0m, log.p = FALSE) == q qzmbinom(log(p), size = 5, prob = 0.5, p0m, log.p = TRUE) == q qzmgeom(p, prob = 0.5, p0m, log.p = FALSE) == q qzmgeom(log(p), prob = 0.5, p0m, log.p = TRUE) == q qzmnbinom(p, size = 5, prob = 0.5, p0m, log.p = FALSE) == q qzmnbinom(log(p), size = 5, prob = 0.5, p0m, log.p = TRUE) == q qzmlogarithmic(p, prob = 0.5, p0m, log.p = FALSE) == q qzmlogarithmic(log(p), prob = 0.5, p0m, log.p = TRUE) == q }) ## POISSON-INVERSE GAUSSIAN ## Reset seed set.seed(123) ## Define a small function to compute probabilities for the PIG ## directly using the Bessel function. dpigBK <- function(x, mu, phi) { M_LN2 <- 0.693147180559945309417232121458 M_SQRT_2dPI <- 0.225791352644727432363097614947 phimu <- phi * mu lphi <- log(phi) y <- x - 0.5 logA = -lphi/2 - M_SQRT_2dPI logB = (M_LN2 + lphi + log1p(1/(2 * phimu * mu)))/2; exp(logA + 1/phimu - lfactorial(x) - y * logB) * besselK(exp(logB - lphi), y) } ## Tests on the probability mass function. mu <- rlnorm(30, 2) phi <- rlnorm(30, 2) x <- 0:100 for (i in seq_along(phi)) { stopifnot(exprs = { all.equal(dpoisinvgauss(x, mean = mu[i], dispersion = phi[i]), dpigBK(x, mu[i], phi[i])) all.equal(dpoisinvgauss(x, mean = Inf, dispersion = phi[i]), dpigBK(x, Inf, phi[i])) }) } ## Tests on cumulative distribution function. for (i in seq_along(phi)) stopifnot(exprs = { all.equal(cumsum(dpoisinvgauss(0:20, mu[i], phi[i])), ppoisinvgauss(0:20, mu[i], phi[i]), tolerance = 1e-8) all.equal(cumsum(dpoisinvgauss(0:20, Inf, phi[i])), ppoisinvgauss(0:20, Inf, phi[i]), tolerance = 1e-8) }) ## ## RANDOM NUMBERS (all discrete distributions) ## set.seed(123) n <- 20 ## Generate variates. ## ## For zero-modified distributions, we simulate two sets of values: ## one with p0m < p0 (suffix 'p0lt') and one with p0m > p0 (suffix ## 'p0gt'). Rztpois <- sort(unique( rztpois(n, lambda = 12))) Rztnbinom <- sort(unique( rztnbinom(n, size = 7, prob = 0.01))) Rztgeom <- sort(unique( rztgeom(n, prob = pi/16))) Rztbinom <- sort(unique( rztbinom(n, size = 55, prob = pi/16))) Rlogarithmic <- sort(unique( rlogarithmic(n, prob = 0.99))) Rzmpoisp0lt <- sort(unique( rzmpois(n, lambda = 6, p0 = 0.001))) Rzmpoisp0gt <- sort(unique( rzmpois(n, lambda = 6, p0 = 0.010))) Rzmnbinomp0lt <- sort(unique( rzmnbinom(n, size = 7, prob = 0.8, p0 = 0.01))) Rzmnbinomp0gt <- sort(unique( rzmnbinom(n, size = 7, prob = 0.8, p0 = 0.40))) Rzmgeomp0lt <- sort(unique( rzmgeom(n, prob = pi/16, p0 = 0.01))) Rzmgeomp0gt <- sort(unique( rzmgeom(n, prob = pi/16, p0 = 0.40))) Rzmbinomp0lt <- sort(unique( rzmbinom(n, size = 12, prob = pi/16, p0 = 0.01))) Rzmbinomp0gt <- sort(unique( rzmbinom(n, size = 12, prob = pi/16, p0 = 0.12))) Rzmlogarithmicp0lt <- sort(unique( rzmlogarithmic(n, prob = 0.99, p0 = 0.05))) Rzmlogarithmicp0gt <- sort(unique( rzmlogarithmic(n, prob = 0.99, p0 = 0.55))) Rpoisinvgauss <- sort(unique( rpoisinvgauss(n, mean = 12, dispersion = 0.1))) RpoisinvgaussInf <- sort(unique( rpoisinvgauss(n, mean = Inf, dispersion = 1.1))) ## Compute quantiles Pztpois <- pztpois (Rztpois, lambda = 12) Pztnbinom <- pztnbinom (Rztnbinom, size = 7, prob = 0.01) Pztgeom <- pztgeom (Rztgeom, prob = pi/16) Pztbinom <- pztbinom (Rztbinom, size = 55, prob = pi/16) Plogarithmic <- plogarithmic(Rlogarithmic, prob = 0.99) Pzmpoisp0lt <- pzmpois (Rzmpoisp0lt, lambda = 6, p0 = 0.001) Pzmpoisp0gt <- pzmpois (Rzmpoisp0gt, lambda = 6, p0 = 0.010) Pzmnbinomp0lt <- pzmnbinom (Rzmnbinomp0lt, size = 7, prob = 0.8, p0 = 0.01) Pzmnbinomp0gt <- pzmnbinom (Rzmnbinomp0gt, size = 7, prob = 0.8, p0 = 0.40) Pzmgeomp0lt <- pzmgeom (Rzmgeomp0lt, prob = pi/16, p0 = 0.01) Pzmgeomp0gt <- pzmgeom (Rzmgeomp0gt, prob = pi/16, p0 = 0.40) Pzmbinomp0lt <- pzmbinom (Rzmbinomp0lt, size = 12, prob = pi/16, p0 = 0.01) Pzmbinomp0gt <- pzmbinom (Rzmbinomp0gt, size = 12, prob = pi/16, p0 = 0.12) Pzmlogarithmicp0lt <- pzmlogarithmic(Rzmlogarithmicp0lt, prob = 0.99, p0 = 0.05) Pzmlogarithmicp0gt <- pzmlogarithmic(Rzmlogarithmicp0gt, prob = 0.99, p0 = 0.55) Ppoisinvgauss <- ppoisinvgauss(Rpoisinvgauss, mean = 12, dispersion = 0.1) PpoisinvgaussInf <- ppoisinvgauss(RpoisinvgaussInf, mean = Inf, dispersion = 1.1) ## Just compute pmf Dztpois <- dztpois (Rztpois, lambda = 12) Dztnbinom <- dztnbinom (Rztnbinom, size = 7, prob = 0.01) Dztgeom <- dztgeom (Rztgeom, prob = pi/16) Dztbinom <- dztbinom (Rztbinom, size = 55, prob = pi/16) Dlogarithmic <- dlogarithmic(Rlogarithmic, prob = pi/16) Dzmpoisp0lt <- dzmpois (Rzmpoisp0lt, lambda = 6, p0 = 0.001) Dzmpoisp0gt <- dzmpois (Rzmpoisp0gt, lambda = 6, p0 = 0.010) Dzmnbinomp0lt <- dzmnbinom (Rzmnbinomp0lt, size = 7, prob = 0.8, p0 = 0.01) Dzmnbinomp0gt <- dzmnbinom (Rzmnbinomp0gt, size = 7, prob = 0.8, p0 = 0.40) Dzmgeomp0lt <- dzmgeom (Rzmgeomp0lt, prob = pi/16, p0 = 0.01) Dzmgeomp0gt <- dzmgeom (Rzmgeomp0gt, prob = pi/16, p0 = 0.40) Dzmbinomp0lt <- dzmbinom (Rzmbinomp0lt, size = 12, prob = pi/16, p0 = 0.01) Dzmbinomp0gt <- dzmbinom (Rzmbinomp0gt, size = 12, prob = pi/16, p0 = 0.12) Dzmlogarithmicp0lt <- dzmlogarithmic(Rzmlogarithmicp0lt, prob = 0.99, p0 = 0.05) Dzmlogarithmicp0gt <- dzmlogarithmic(Rzmlogarithmicp0gt, prob = 0.99, p0 = 0.55) Dpoisinvgauss <- dpoisinvgauss(Rpoisinvgauss, mean = 12, dispersion = 0.1) DpoisinvgaussInf <- dpoisinvgauss(RpoisinvgaussInf, mean = Inf, dispersion = 1.1) ## Check q(p(.)) identity ep <- 1e-7 f1 <- 1 - ep # = 0.9999999 stopifnot(exprs = { Rztpois == qztpois (f1 * Pztpois, lambda = 12) Rztnbinom == qztnbinom (f1 * Pztnbinom, size = 7, prob = 0.01) Rztgeom == qztgeom (f1 * Pztgeom, prob = pi/16) Rztbinom == qztbinom (f1 * Pztbinom, size = 55, prob = pi/16) Rlogarithmic == qlogarithmic(f1 * Plogarithmic, prob = 0.99) Rzmpoisp0lt == qzmpois (Pzmpoisp0lt, lambda = 6, p0 = 0.001) Rzmpoisp0gt == qzmpois (Pzmpoisp0gt, lambda = 6, p0 = 0.010) Rzmnbinomp0lt == qzmnbinom (Pzmnbinomp0lt, size = 7, prob = 0.8, p0 = 0.01) Rzmnbinomp0gt == qzmnbinom (Pzmnbinomp0gt, size = 7, prob = 0.8, p0 = 0.40) Rzmgeomp0lt == qzmgeom (Pzmgeomp0lt, prob = pi/16, p0 = 0.01) Rzmgeomp0gt == qzmgeom (Pzmgeomp0gt, prob = pi/16, p0 = 0.40) Rzmbinomp0lt == qzmbinom (Pzmbinomp0lt, size = 12, prob = pi/16, p0 = 0.01) Rzmbinomp0gt == qzmbinom (Pzmbinomp0gt, size = 12, prob = pi/16, p0 = 0.12) Rzmlogarithmicp0lt == qzmlogarithmic(Pzmlogarithmicp0lt, prob = 0.99, p0 = 0.05) Rzmlogarithmicp0gt == qzmlogarithmic(Pzmlogarithmicp0gt, prob = 0.99, p0 = 0.55) Rpoisinvgauss == qpoisinvgauss(Ppoisinvgauss, mean = 12, dispersion = 0.1) RpoisinvgaussInf == qpoisinvgauss(PpoisinvgaussInf, mean = Inf, dispersion = 1.1) }) ## Check q(p(.)) identity with upper tail p1 <- 1 + ep # 1.0000001 stopifnot(exprs = { Rztpois == qztpois (p1 - Pztpois, lambda = 12, lower = FALSE) Rztnbinom == qztnbinom (p1 - Pztnbinom, size = 7, prob = 0.01, lower = FALSE) Rztgeom == qztgeom (p1 - Pztgeom, prob = pi/16, lower = FALSE) Rztbinom == qztbinom (p1 - Pztbinom, size = 55, prob = pi/16, lower = FALSE) Rlogarithmic == qlogarithmic(p1 - Plogarithmic, prob = 0.99, lower = FALSE) Rzmpoisp0lt == qzmpois (1 - Pzmpoisp0lt, lambda = 6, p0 = 0.001, lower = FALSE) Rzmpoisp0gt == qzmpois (1 - Pzmpoisp0gt, lambda = 6, p0 = 0.010, lower = FALSE) Rzmnbinomp0lt == qzmnbinom (1 - Pzmnbinomp0lt, size = 7, prob = 0.8, p0 = 0.01, lower = FALSE) Rzmnbinomp0gt == qzmnbinom (1 - Pzmnbinomp0gt, size = 7, prob = 0.8, p0 = 0.40, lower = FALSE) Rzmgeomp0lt == qzmgeom (1 - Pzmgeomp0lt, prob = pi/16, p0 = 0.01, lower = FALSE) Rzmgeomp0gt == qzmgeom (1 - Pzmgeomp0gt, prob = pi/16, p0 = 0.40, lower = FALSE) Rzmbinomp0lt == qzmbinom (1 - Pzmbinomp0lt, size = 12, prob = pi/16, p0 = 0.01, lower = FALSE) Rzmbinomp0gt == qzmbinom (1 - Pzmbinomp0gt, size = 12, prob = pi/16, p0 = 0.12, lower = FALSE) Rzmlogarithmicp0lt == qzmlogarithmic(1 - Pzmlogarithmicp0lt, prob = 0.99, p0 = 0.05, lower = FALSE) Rzmlogarithmicp0gt == qzmlogarithmic(1 - Pzmlogarithmicp0gt, prob = 0.99, p0 = 0.55, lower = FALSE) Rpoisinvgauss == qpoisinvgauss(1 - Ppoisinvgauss, mean = 12, dispersion = 0.1, lower = FALSE) RpoisinvgaussInf == qpoisinvgauss(1 - PpoisinvgaussInf, mean = Inf, dispersion = 1.1, lower = FALSE) }) ## Check q(p(., log), log) identity stopifnot(exprs = { Rztpois == qztpois (log(Pztpois) - ep, lambda = 12, log = TRUE) Rztnbinom == qztnbinom (log(Pztnbinom) - ep, size = 7, prob = 0.01, log = TRUE) Rztgeom == qztgeom (log(Pztgeom) - ep, prob = pi/16, log = TRUE) Rztbinom == qztbinom (log(Pztbinom) - ep, size = 55, prob = pi/16, log = TRUE) Rlogarithmic == qlogarithmic(log(Plogarithmic) - ep, prob = 0.99, log = TRUE) Rzmpoisp0lt == qzmpois (log(Pzmpoisp0lt), lambda = 6, p0 = 0.001, log = TRUE) Rzmpoisp0gt == qzmpois (log(Pzmpoisp0gt), lambda = 6, p0 = 0.010, log = TRUE) Rzmnbinomp0lt == qzmnbinom (log(Pzmnbinomp0lt), size = 7, prob = 0.8, p0 = 0.01, log = TRUE) Rzmnbinomp0gt == qzmnbinom (log(Pzmnbinomp0gt), size = 7, prob = 0.8, p0 = 0.40, log = TRUE) Rzmgeomp0lt == qzmgeom (log(Pzmgeomp0lt), prob = pi/16, p0 = 0.01, log = TRUE) Rzmgeomp0gt == qzmgeom (log(Pzmgeomp0gt), prob = pi/16, p0 = 0.40, log = TRUE) Rzmbinomp0lt == qzmbinom (log(Pzmbinomp0lt), size = 12, prob = pi/16, p0 = 0.01, log = TRUE) Rzmbinomp0gt == qzmbinom (log(Pzmbinomp0gt), size = 12, prob = pi/16, p0 = 0.12, log = TRUE) Rzmlogarithmicp0lt == qzmlogarithmic(log(Pzmlogarithmicp0lt), prob = 0.99, p0 = 0.05, log = TRUE) Rzmlogarithmicp0gt == qzmlogarithmic(log(Pzmlogarithmicp0gt), prob = 0.99, p0 = 0.55, log = TRUE) Rpoisinvgauss == qpoisinvgauss(log(Ppoisinvgauss), mean = 12, dispersion = 0.1, log = TRUE) RpoisinvgaussInf == qpoisinvgauss(log(PpoisinvgaussInf), mean = Inf, dispersion = 1.1, log = TRUE) }) ## Check q(p(., log), log) identity with upper tail stopifnot(exprs = { Rztpois == qztpois (ep + log1p(-Pztpois), lambda = 12, lower = FALSE, log = TRUE) Rztnbinom == qztnbinom (ep + log1p(-Pztnbinom), size = 7, prob = 0.01, lower = FALSE, log = TRUE) Rztgeom == qztgeom (ep + log1p(-Pztgeom), prob = pi/16, lower = FALSE, log = TRUE) Rztbinom == qztbinom (ep + log1p(-Pztbinom), size = 55, prob = pi/16, lower = FALSE, log = TRUE) Rlogarithmic == qlogarithmic(ep + log1p(-Plogarithmic), prob = 0.99, lower = FALSE, log = TRUE) Rzmpoisp0lt == qzmpois (log1p(-Pzmpoisp0lt), lambda = 6, p0 = 0.001, lower = FALSE, log = TRUE) Rzmpoisp0gt == qzmpois (log1p(-Pzmpoisp0gt), lambda = 6, p0 = 0.010, lower = FALSE, log = TRUE) Rzmnbinomp0lt == qzmnbinom (log1p(-Pzmnbinomp0lt), size = 7, prob = 0.8, p0 = 0.01, lower = FALSE, log = TRUE) Rzmnbinomp0gt == qzmnbinom (log1p(-Pzmnbinomp0gt), size = 7, prob = 0.8, p0 = 0.40, lower = FALSE, log = TRUE) Rzmgeomp0lt == qzmgeom (log1p(-Pzmgeomp0lt), prob = pi/16, p0 = 0.01, lower = FALSE, log = TRUE) Rzmgeomp0gt == qzmgeom (log1p(-Pzmgeomp0gt), prob = pi/16, p0 = 0.40, lower = FALSE, log = TRUE) Rzmbinomp0lt == qzmbinom (log1p(-Pzmbinomp0lt), size = 12, prob = pi/16, p0 = 0.01, lower = FALSE, log = TRUE) Rzmbinomp0gt == qzmbinom (log1p(-Pzmbinomp0gt), size = 12, prob = pi/16, p0 = 0.12, lower = FALSE, log = TRUE) Rzmlogarithmicp0lt == qzmlogarithmic(log1p(-Pzmlogarithmicp0lt), prob = 0.99, p0 = 0.05, lower = FALSE, log = TRUE) Rzmlogarithmicp0gt == qzmlogarithmic(log1p(-Pzmlogarithmicp0gt), prob = 0.99, p0 = 0.55, lower = FALSE, log = TRUE) Rpoisinvgauss == qpoisinvgauss(log1p(-Ppoisinvgauss), mean = 12, dispersion = 0.1, lower = FALSE, log = TRUE) RpoisinvgaussInf == qpoisinvgauss(log1p(-PpoisinvgaussInf), mean = Inf, dispersion = 1.1, lower = FALSE, log = TRUE) }) ## Check limiting cases stopifnot(exprs = { rztpois (1, lambda = 0) == 1 rztnbinom (1, size = 7, prob = 1) == 1 rztgeom (1, prob = 1) == 1 rztbinom (1, size = 1, prob = pi/16) == 1 rztbinom (1, size = 55, prob = 0) == 1 rlogarithmic(1, prob = 0) == 1 rzmpois (1, lambda = 0, 0.5) %in% c(0, 1) rzmnbinom (1, size = 7, prob = 1, 0.5) %in% c(0, 1) rzmgeom (1, prob = 1, 0.5) %in% c(0, 1) rzmbinom (1, size = 1, prob = pi/16, 0.5) %in% c(0, 1) rzmbinom (1, size = 55, prob = 0, 0.5) %in% c(0, 1) rzmlogarithmic(1, prob = 0, 0.5) %in% c(0, 1) }) actuar/tests/var-methods-tests.R0000644000176200001440000000223415147745722016426 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Tests for the 'var' and 'sd' methods for individual and grouped ### data. ### ### AUTHOR: Vincent Goulet ## Load the package library(actuar) ### ### Individual data ### ## Check that results are identical to stats::var and stats::sd, as it ## should be. stopifnot(exprs = { identical(var(dental), stats::var(dental)) identical(sd(dental), stats::sd(dental)) }) ## Check correct handling of missing data (issue #5 fixed with ## 62b92eff). x <- c(dental, NA) stopifnot(exprs = { identical(var(x, na.rm = TRUE), stats::var(x, na.rm = TRUE)) identical(sd(x, na.rm = TRUE), stats::sd(x, na.rm = TRUE)) }) ### ### Grouped data ### ## Extract group boundaries and frequencies from a grouped data ## object. cj <- gdental[, 1] nj <- gdental[, 2] ## Compute variance and standard deviation by hand. midpoints <- cj[-length(cj)] + diff(cj)/2 means <- drop(crossprod(nj, midpoints)/sum(nj)) v <- drop(crossprod(nj, (midpoints - means)^2)/(sum(nj) - 1)) s <- sqrt(v) stopifnot(exprs = { all.equal(v, unname(var(gdental))) all.equal(s, unname(sd(gdental))) }) actuar/MD50000644000176200001440000003422715151421061012045 0ustar liggesusersd854fe598e406177949739b0c1878cdd *DESCRIPTION ac28959174a2eb45950484feb0792d8b *NAMESPACE 897ed422ca2c1beb724be294ffa9e9ac *R/BetaMoments.R 4eb3807e52e187a67327d326e5cec116 *R/Burr.R 3965f43c4eb19c7a7dd3f0b0d9ce82ed *R/CTE.R d71c766092688d8ace86e9390d9494fc *R/ChisqSupp.R e1449bca14a1d2067b5ca96d81465394 *R/ExponentialSupp.R b478732d335382c1fd29ae2ffcb594b8 *R/Extract.grouped.data.R ddfc518f5632c092117bd3683ab71b6a *R/FellerPareto.R 6536049b3a817c22298cfbc7d708b9de *R/GammaSupp.R 1d798ad9fb619c57b6234ce538c7843f *R/GeneralizedBeta.R b60c8cf61518d3fcb6c46e1cc8d0b4fe *R/GeneralizedPareto.R bc8505634d74b7bd8201400bfb0411cf *R/Gumbel.R 1f7797c8d96468caec51272ca6112b0e *R/InverseBurr.R b67bfde359869dca1d51d5a6270f5d40 *R/InverseExponential.R b559da828d4e3e283fb93b0691049d03 *R/InverseGamma.R c9152b7948c9154da4df196d7d0615c0 *R/InverseGaussian.R 0210a7e85bb7071ba9be657c71e7de40 *R/InverseParalogistic.R 9e70aca6c90b9233fab986fef20b5786 *R/InversePareto.R 49bab767297187138d857c408e633f46 *R/InverseTransformedGamma.R 535173040b228938cee28e2bc565d66c *R/InverseWeibull.R d53ffa132009c951d906001622f90168 *R/Logarithmic.R 779255efbe27b5127d5a4c018de58eec *R/Loggamma.R 7b310e8e8c1c2f21ca4d5a6c2d45687d *R/Loglogistic.R 51fa5c8ec5ac728cc9a206c9b3a685df *R/LognormalMoments.R ba49f5d675f03254c59efaf17eb09ab4 *R/NormalSupp.R 7e118f3973ead42d8c27175834d14b3e *R/Paralogistic.R a6639ef45829476468f3569f6a72e0db *R/Pareto.R 366fa3267df223b0138ad727d7017cfc *R/Pareto2.R ea1af518ab2a801760f9096e25a7fcf9 *R/Pareto3.R 6b60deaa3fc14350121d6e0eb1053a86 *R/Pareto4.R 54701777966346cc4113acf73cc54d0f *R/PhaseType.R 4d5ae06ab8158d0460b1141454cfe292 *R/PoissonInverseGaussian.R 15162616a66854c75a163b54c55bf1a7 *R/SingleParameterPareto.R 5d890d751ef3f233e565bf337934928b *R/TransformedBeta.R 97f04c8a4d99357e1dc7dc157e99777c *R/TransformedGamma.R 42309c4a3f95e3c0f2a3d85de52c8ae8 *R/UniformSupp.R f7825e63fc68307bc8d8fb7e3e4dc993 *R/VaR.R 3a6a050a94d357f2722c5bd679188c41 *R/WeibullMoments.R 28cff186b532b3f2f11de659ec8117f0 *R/ZeroModifiedBinomial.R 23ddfd9674a66c845f109175b200273b *R/ZeroModifiedGeometric.R 30c2ee36ccd94a418a7fe8d831053e81 *R/ZeroModifiedLogarithmic.R 7ef383933d9f3e57ca467eb2f48126bb *R/ZeroModifiedNegativeBinomial.R 86951dbd48d9072ae2c39587b6ce479a *R/ZeroModifiedPoisson.R a7404543a59fdf1098ff194b4a5b07d7 *R/ZeroTruncatedBinomial.R cdef9997fe9a5aad4fd59a783b5b5641 *R/ZeroTruncatedGeometric.R 8a15f7c97fbc80d384a7b1ab65e58222 *R/ZeroTruncatedNegativeBinomial.R 9a8b00de6d10f69f370de9b34b70638c *R/ZeroTruncatedPoisson.R 46bcac0d79b89d79300dffd00679d84d *R/adjCoef.R 082d3a685d776e423bf227e86a53b255 *R/aggregateDist.R 9386b587af91970a246cffe22125c99c *R/bayes.R cad82d69924eea24d9331043aa8d932f *R/betaint.R deefe58e5493ecbe1b1a66349af978d4 *R/bstraub.R 07ecc120ec9709b3a3169464f9157fa0 *R/cm.R a0785a0d55c903985ce5b8e2caed3a70 *R/coverage.R 97e6d19c54e56f2bbabc18421e9750c1 *R/discretize.R ebb17d2210a64597ec60bf7a1afb81d9 *R/elev.R 1f16152b56d6b585cb3b34019c9f4c9a *R/emm.R 6c108ca22200bb02cffe0c0a615f818f *R/exact.R 514f21d7247da2c1a2dc1e5d7b671c8c *R/grouped.data.R cee57eb2dbbbce79ba41398e6ed4ee99 *R/hache.R 4ee527efa52117e2afadb4e3f9dcbc54 *R/hache.barycenter.R dfbc96214f68bd8ba5753cad4a214fbe *R/hache.origin.R 08e0a5941190b2ac7c6760a1814cf3fc *R/hierarc.R 9dd9dde7e2fa6f0cb97d283a6d153a67 *R/hist.grouped.data.R abd5e8b6d8c097f12d49fae70402ab8a *R/mde.R b5ba4fcb93636b9109a612b59ead39a1 *R/mean.grouped.data.R 51ae31191cbe874bb0776ad9ac1f4329 *R/normal.R ab0cfeac34a96a29763454cae34e9d96 *R/ogive.R 1ff77003e9dc0520ab130042b488c469 *R/panjer.R 25e2fd00a7ec8be892f3046f9dbc2d45 *R/quantile.aggregateDist.R 189c7d99b3ac103a4bc89a054cce6b07 *R/quantile.grouped.data.R 2e057c192eec665f9f80aa94337aa0c6 *R/rcomphierarc.R 604be8757d8ed0337443af0f990906c7 *R/rcomphierarc.summaries.R 8c1266b5a2074e6c76ca7a18bbdc1942 *R/rcompound.R ba40593cd7cc3bf25c223697acc512c6 *R/rmixture.R d4ac27948086827d0a87e740af701abf *R/ruin.R d8496850dc700280270491250a63ff56 *R/severity.R 8100464532661fe82f6fb460bc9713a2 *R/simS.R a9c79777e1bbc8f7aa5e38e99132fca8 *R/unroll.R cccf737b2a274d7071aa7b0c9e47bd4a *R/var-methods.R 62fc4f745eb5441a27cfd3e11c41364e *build/partial.rdb cafe8a9095653f7b017c449a0b6307a1 *build/vignette.rds da6fbdff847094e9912fa30c9face3a3 *data/dental.rda e886a1fe6ab8ec18a541f6b8ea5c6683 *data/gdental.rda 6380487493d31ef00477ec198b042f95 *data/hachemeister.rda 206fe0a149ac938f90adeec103672e39 *demo/00Index 70cdea149ce67c7ea8dfdb6d3d2d0204 *demo/credibility.R b255336d6294311b8e51c4e63c385b3b *demo/lossdist.R 3a93e5a0fe9eefddb6f977e9f7d02147 *demo/risk.R 4c5d46011a661ed3fc81c6ed29a3f556 *demo/simulation.R 265512e43f452a2c49a40dcdb3816a8d *inst/CITATION 2a7a3a4bfc457254fb8d8fd273674465 *inst/NEWS.0.Rd 93822768a8a7bc79173a88839d75679d *inst/NEWS.1.Rd 46233fd433b7a57d4ac93562a459f61c *inst/NEWS.2.Rd 2c42a4b8361f96b3cc7aaf2b725a90ae *inst/NEWS.Rd 54c6102cdf7b26efb50d96f537b9f77a *inst/doc/actuar.R 25eda75d3a9dafb321cf02767f2e0699 *inst/doc/actuar.Rnw b634d8e17c5d0e809957d7a20db5618d *inst/doc/actuar.pdf 2d03a5e39d6ff94432112d0936a0e1cc *inst/doc/coverage.R 5f5a49b3fb7528a82c4aae99eccd4d01 *inst/doc/coverage.Rnw 5ab3f24d3f1115c61fd7b3ac5f7b6cd1 *inst/doc/coverage.pdf c34e24f1569ec7886f780e963ab0f5c7 *inst/doc/credibility.R 61afc0a453a8b5657ae62f6858d2647b *inst/doc/credibility.Rnw 37c9ce3fff1f553b2999f2abc565e9eb *inst/doc/credibility.pdf fe30d0dda904a51ed55c4d2be5c39a3b *inst/doc/distributions.Rnw e73b661960770f5d4c58f4d9798e757d *inst/doc/distributions.pdf f19c665d32a053449a72780d03eb8752 *inst/doc/modeling.R 41ae79ebc61185ddef5cf6d37b4c2240 *inst/doc/modeling.Rnw 226bff67126311e0e4a0b0f87de8c9e5 *inst/doc/modeling.pdf 413292084d97e9652ac52adfc6212907 *inst/doc/risk.R 1a59e2be1fa4daa72d09192e1031878d *inst/doc/risk.Rnw 0bada60bb0234291a5eda2e9a4d4bb74 *inst/doc/risk.pdf 36a7d3c8b03e0ef6a54486dcb4764526 *inst/doc/simulation.R f46dc29345c3f3b43d11f606e7b5df8a *inst/doc/simulation.Rnw 5d8932da984f159576f416071bf14319 *inst/doc/simulation.pdf bebc84cdf387870641034e7de14570b2 *inst/include/actuarAPI.h fe020c2287ee8dc8c57a9920ed8efc2e *inst/po/en@quot/LC_MESSAGES/R-actuar.mo 357c632c0ac3304401e6fc77ac6fb5f4 *inst/po/en@quot/LC_MESSAGES/actuar.mo 308b748c6f30cd5690aba506cefdbb90 *inst/po/fr/LC_MESSAGES/R-actuar.mo f8cd3795d32963c6fc32b064f8ba9bfc *inst/po/fr/LC_MESSAGES/actuar.mo d63628bf63171d06a2d4caac032ca298 *inst/po/it/LC_MESSAGES/R-actuar.mo f0c66c364544558c97bc0be2db8632ef *inst/po/it/LC_MESSAGES/actuar.mo a755dd2fbf5c32b50ebc32b75b905812 *man/BetaMoments.Rd a5b36249c0e0e080efe0d433085e291a *man/Burr.Rd d879ce2614ecb9cada7c2299aa8ef717 *man/CTE.Rd f90e3267c218e2bbec36827befbd5681 *man/ChisqSupp.Rd a36da40ba94b948d645c0c7291884f6a *man/ExponentialSupp.Rd e64e51f65b83ef97abfebd0f0f0c38da *man/Extract.grouped.data.Rd 5b0c96bcc91218a4ad8d144b5416c392 *man/FellerPareto.Rd f99fdaf70013ee8d9b93862edaa1ce4d *man/GammaSupp.Rd c07b818c28d902ba89d2e4bd6f91996c *man/GeneralizedBeta.Rd 3dd31f6e5df7ab4a85b3d0e4c9cf017d *man/GeneralizedPareto.Rd 8fbf644555828cdb77b5ca846961e322 *man/Gumbel.Rd 8dcb5ce64fc21451b6b5bdb8d8120e83 *man/InverseBurr.Rd 8cbeea0bec948b962a48920ff3a1aeb7 *man/InverseExponential.Rd 1a388b76d074be8c7327ed4e70612236 *man/InverseGamma.Rd 9ea8090f2ccea030c4bb0c4a2754419c *man/InverseGaussian.Rd 4828f4076903a9b35ad0b7c37734e393 *man/InverseParalogistic.Rd bbf0d27f10ffb11afcd9f7369f1b3dbd *man/InversePareto.Rd 5f7b9c1c63fe38a8eca486eeb2ff199f *man/InverseTransformedGamma.Rd 23ffafbba3401b859954b533247a90b8 *man/InverseWeibull.Rd e6df2d08359ce298e71fa7a68125e1f3 *man/Logarithmic.Rd cccb4d2ddcd349c5a207f33ab3077dd1 *man/Loggamma.Rd 0dbde815295726f32cf5f563ec102225 *man/Loglogistic.Rd d6c1fe8d8b341ded225bc193d3fa4588 *man/LognormalMoments.Rd ab9f79c93ad324f4799a1fbbed2141d1 *man/NormalSupp.Rd fc8d0fdedfef3ee1d53a79a1b702cf1b *man/Paralogistic.Rd ad5cfa789efa6493aefe1acb91a5a6bc *man/Pareto.Rd 8e4341f214ca4c0f37479f6a315f0d6a *man/Pareto2.Rd bc68a38c54a1da299fa938a8862cbe3d *man/Pareto3.Rd c6c2e08dd5218b3f0bf984b9c635b301 *man/Pareto4.Rd ca22543943f8235456aa588c1aebdb30 *man/PhaseType.Rd f66617fa4003eec4a7e466f5e15f9c46 *man/PoissonInverseGaussian.Rd de268358595d4c258b70eafe1d3ccc20 *man/SingleParameterPareto.Rd 879a15c9e184fd89663a3b2722fdb9d6 *man/TransformedBeta.Rd ae97789b1f01240d83101889836cdac4 *man/TransformedGamma.Rd ed2257d1b871c738b3568946e9060854 *man/UniformSupp.Rd 17e4ad925959dd184c1e311661937a4d *man/VaR.Rd 30ccb130026f702bd6880cdebd5e0cdc *man/WeibullMoments.Rd 84a695bd7218b7d86adf69558261bad8 *man/ZeroModifiedBinomial.Rd a32fd5e382630aeb2cea6a364b5a8480 *man/ZeroModifiedGeometric.Rd cef1debcf683144a1391aa04bcfb27cc *man/ZeroModifiedLogarithmic.Rd 4f715f784f55d6f1c35c5b0e76f3148c *man/ZeroModifiedNegativeBinomial.Rd fe31b64dd377996a320d7f0ec536b39a *man/ZeroModifiedPoisson.Rd f4667c1a70dfb2c9ceb8f6d1f3476324 *man/ZeroTruncatedBinomial.Rd 6b3e1b6c4afd2bdcb3edf322a4f72dfc *man/ZeroTruncatedGeometric.Rd 9c13ff5ed2532cce33a4b33ac53ceaa0 *man/ZeroTruncatedNegativeBinomial.Rd a3b68a6537cd0a46011529c66b738696 *man/ZeroTruncatedPoisson.Rd d4abca98c7c87e4ac564a044fcf31679 *man/actuar-package.Rd 536b3b28791e306bcaf157ec480aa6a3 *man/adjCoef.Rd 2e390d1f0143bed5093fd67edd421621 *man/aggregateDist.Rd d6d01d1130a6c3c7c1b9bbd7ddb26cae *man/betaint.Rd c518a79ea38d749f528650b5970ba85b *man/cm.Rd 6e32ade77f871805b5650f784f806abe *man/coverage.Rd 6044e3f1b95f253a80c0d578edbbd7a6 *man/dental.Rd d7e21ad16f1eb6c4fbd8122c13d16c30 *man/discretize.Rd db5fd4f06810d8020f1cd70d4d2e8453 *man/elev.Rd c15d6cd098d1bf0e524c205ef85e1a59 *man/emm.Rd 39643c4bc3d0cb51764f6e64aa2dd354 *man/gdental.Rd 35b3f616d3637b5b32966681f3cdcea6 *man/grouped.data.Rd daaaa29d395f33c225e96f69fa9d74a6 *man/hachemeister.Rd 2c4852e710c1ffcc693ae6be10213465 *man/hist.grouped.data.Rd 2090db683b954338399a99539b34e7ad *man/mde.Rd f48a9ca0f857ada94234a4b37f41e8c2 *man/mean.grouped.data.Rd 45e87a1fca0cc9f9d9aabeb1f8deb79d *man/ogive.Rd baa2b018d4db06e55bf4cb9e7c118a4b *man/quantile.aggregateDist.Rd c1d373c5dab78d5ba1e8ce5288ac4259 *man/quantile.grouped.data.Rd 407b19473359f63c87c3c2f3808c529d *man/rcomphierarc.Rd bb2b512a9587144411acab2fe2edf8ac *man/rcomphierarc.summaries.Rd 1b64c461cacca100fd654d89525482a2 *man/rcompound.Rd 04a50062890975798781d7f5d9c6982f *man/rmixture.Rd 771638ff079e481475b4fb08e5f24059 *man/ruin.Rd 62fe79bec4453860df38c4add86c7091 *man/severity.Rd 52c4034e29078164436cbce1e4444952 *man/unroll.Rd 2f799ad801d4c4172aaf761e6b0937ef *man/var-methods.Rd 8a07fbcaf126feac5a4918f8ff47ae1a *po/R-actuar.pot 5811abc029854005f3fa88532484eaa2 *po/R-fr.po 498af72bedff605815abc310a87e1083 *po/R-it.po a2884ce3ae9ad6cb5b7d9dca665c5847 *po/actuar.pot b3a2b0896fcecbff1a21a09b1585c8a1 *po/fr.po e39b16aaa4db083d6c065e4022dacee6 *po/it.po 7fbcbeb771d645b3019868c753dc7dac *src/Makevars bece94f05eade2c2b72b692454215695 *src/actuar-win.def d866223f447064c1cc4e0b2703a5ef79 *src/actuar.h c8b2ead45feff5557b4426059f358fc8 *src/beta.c 167d153a6a6d466ab507db0780474c2b *src/betaint.c 9c30d6493c7c88062061bc212a2f94ac *src/burr.c 48c24c75e7e5e6985c079645e33fd154 *src/chisq.c 242e70d39cd429a8546e2e692ea78267 *src/dpq.c 8082124c29513b098f5a72bc395e367b *src/dpq.h 8f41957f2fe0fd5813604e9231835703 *src/dpqphtype.c 20fde04e013227cc195a5d7f4c6dd76d *src/exp.c 8523c8415c74846418f7668e804962e2 *src/fpareto.c 312fe85f8307c7e45d53f118789b92e9 *src/gamma.c b30606d5ef612b7e36ad521a6fa4f15c *src/genbeta.c 07617c7f9bd3850dbf5c125205ee4a50 *src/genpareto.c 5881f88789e12197629dabb82f37bfe2 *src/gumbel.c 66e3624e2411e885fa5de545c2588926 *src/hierarc.c 09d1cf8e8af5b90664cc144bbcf0c7f9 *src/init.c add45c5792404f0e50488101561cda98 *src/invburr.c d24f3ec76f1a390865236e761507eca9 *src/invexp.c 773bf6dfd624e013153dc6fad0b41ef0 *src/invgamma.c 9b30a69b2505745fa743b121d3f2b202 *src/invgauss.c bf661856dd4cd75b261ed8da58075631 *src/invparalogis.c f341f9a518224a9666a1f3503d136ffe *src/invpareto.c d468b0b2f832b71e1f2d718a3ce05d07 *src/invtrgamma.c 94698806446eb8b6a9399020fde2117a *src/invweibull.c 1dfe640aeab42b10962917c55488789e *src/lgamma.c 3c1665ee7820b06bf9b4cd19f0b5bab8 *src/llogis.c f75ec671f0aae41a9096ccfa759ebdba *src/lnorm.c bbb5f2e900fe0cc5bc568d702d3ec3ab *src/locale.h 2ab61cbb5419ed0ad44acb3be2a24ac3 *src/logarithmic.c 9c713a0375e6f60f9da951502bea6317 *src/names.c 9093d8b368d18412c108c79c0f06ae8f *src/norm.c faee18c808ccb6d6bc99e5967d430831 *src/panjer.c 8d8c6e88112590c5ba1b37a45e32fe36 *src/paralogis.c 082700f0e53fa1245858c85fc3db73bc *src/pareto.c a46729c17991f37c65ae2228275fbf2a *src/pareto1.c 2c63b5b1f9e4ac3b92db32142146e1d9 *src/pareto2.c eed5c0fcf81b02a01d4f650fffe09e61 *src/pareto3.c 8b5ee13ac8d7ebcfe884bcb9ed43f409 *src/pareto4.c 08c56928909e1a32dafd3464db6517b5 *src/phtype.c 4ab64f4ab0bf4914536062481a784d4c *src/poisinvgauss.c 51f32a1046d9b124b9eeb05d17bd5218 *src/qDiscrete_search.h b21f46cf8a20a95a1cded0a65b11c766 *src/random.c 8a34f804eead95891e41c14cefc60b68 *src/randomphtype.c 61079014ae71296852fcbf372276be60 *src/trbeta.c 57d10d46317c060649b4389db9074797 *src/trgamma.c 7a82ca42fbdae512854bdf4794c0a558 *src/unif.c 1ac736574129de7ec9da305997d4bc9c *src/util.c 42214b9442bcede3be64125ec8e46e6d *src/weibull.c ebd67765c1deb99d57ff18604260e863 *src/zmbinom.c 762784c9ac075d8702fb52731e8368a7 *src/zmgeom.c 3df633fbd8f3fa2146b0abcd6d4ff060 *src/zmlogarithmic.c 20205aa2e0d66e606a1003f1de6e2413 *src/zmnbinom.c 2febbb25bd59d401e111b6128a57f5c4 *src/zmpois.c c97bd0affaa229f26a8b97ea5ff8df2b *src/ztbinom.c e29da913fdfa9bd17b55d4b774f9aa78 *src/ztgeom.c f9abc3929cfe4a449286e0f7f97860c3 *src/ztnbinom.c 84705e95f75f20d3c04559d7bc07e4fd *src/ztpois.c b2dc8c5900c8e9933c38165a381d8176 *tests/betaint-tests.R 79a0b2697e6f62bf3af24bc70b1d8d90 *tests/dpqr-tests.R 19128e4a29a71c7db9e2301ee0debac5 *tests/rcompound-tests.R ab41d2f76222ec733cb701a556adc346 *tests/rmixture-tests.R e0f352d5b015a0d55dfb60f8e965b657 *tests/var-methods-tests.R 686901b4349abc35727753978c071833 *vignettes/Makefile 25eda75d3a9dafb321cf02767f2e0699 *vignettes/actuar.Rnw ddeae7214a90ee41b648f2e58c6660fb *vignettes/actuar.bib 5f5a49b3fb7528a82c4aae99eccd4d01 *vignettes/coverage.Rnw 61afc0a453a8b5657ae62f6858d2647b *vignettes/credibility.Rnw fe30d0dda904a51ed55c4d2be5c39a3b *vignettes/distributions.Rnw 7ec15c16d0d66790f28e90343c5434a3 *vignettes/framed.sty 41ae79ebc61185ddef5cf6d37b4c2240 *vignettes/modeling.Rnw 1a59e2be1fa4daa72d09192e1031878d *vignettes/risk.Rnw d56dff619edf642e5023f91464a309fc *vignettes/share/preamble.tex f46dc29345c3f3b43d11f606e7b5df8a *vignettes/simulation.Rnw actuar/po/0000755000176200001440000000000015151412457012154 5ustar liggesusersactuar/po/R-actuar.pot0000644000176200001440000001341615147745722014374 0ustar liggesusersmsgid "" msgstr "" "Project-Id-Version: actuar 3.3-4\n" "POT-Creation-Date: 2023-11-07 14:41\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" msgid "rows extracted in increasing order" msgstr "" msgid "impossible to replace boundaries and frequencies simultaneously" msgstr "" msgid "only logical matrix subscripts are allowed in replacement" msgstr "" msgid "need 0, 1, or 2 subscripts" msgstr "" msgid "one of %s or %s is needed" msgstr "" msgid "mgf.claim" msgstr "" msgid "h" msgstr "" msgid "%s must be a function or an expression containing %s" msgstr "" msgid "x" msgstr "" msgid "mgf.wait" msgstr "" msgid "%s must be a function or an expression containing %s and %s" msgstr "" msgid "y" msgstr "" msgid "%s must be a function when using reinsurance" msgstr "" msgid "premium.rate" msgstr "" msgid "%s must supply the mean and variance of the distribution" msgstr "" msgid "moments" msgstr "" msgid "%s must supply the mean, variance and skewness of the distribution" msgstr "" msgid "%s must supply the number of simulations" msgstr "" msgid "nb.simul" msgstr "" msgid "expressions in %s and %s must be named" msgstr "" msgid "model.freq" msgstr "" msgid "model.sev" msgstr "" msgid "%s must be a vector of probabilities" msgstr "" msgid "frequency distribution must be supplied as a character string" msgstr "" msgid "internal error" msgstr "" msgid "function not defined for approximating distributions" msgstr "" msgid "lower bound of the likelihood missing" msgstr "" msgid "one of the Gamma prior parameter %s, %s or %s missing" msgstr "" msgid "shape" msgstr "" msgid "rate" msgstr "" msgid "scale" msgstr "" msgid "one of the Beta prior parameter %s or %s missing" msgstr "" msgid "shape1" msgstr "" msgid "shape2" msgstr "" msgid "parameter %s of the likelihood missing" msgstr "" msgid "size" msgstr "" msgid "shape.lik" msgstr "" msgid "sd.lik" msgstr "" msgid "unsupported likelihood" msgstr "" msgid "missing ratios not allowed when weights are not supplied" msgstr "" msgid "there must be at least one node with more than one period of experience" msgstr "" msgid "there must be more than one node" msgstr "" msgid "missing values are not in the same positions in %s and in %s" msgstr "" msgid "weights" msgstr "" msgid "ratios" msgstr "" msgid "no available data to fit model" msgstr "" msgid "maximum number of iterations reached before obtaining convergence" msgstr "" msgid "unsupported interactions in %s" msgstr "" msgid "formula" msgstr "" msgid "hierarchical regression models not supported" msgstr "" msgid "ratios have to be supplied if weights are" msgstr "" msgid "empty regression model; fitting with Buhlmann-Straub's model" msgstr "" msgid "invalid level name" msgstr "" msgid "coverage modifications must be positive" msgstr "" msgid "deductible must be smaller than the limit" msgstr "" msgid "coinsurance must be between 0 and 1" msgstr "" msgid "%s must be supplied" msgstr "" msgid "cdf" msgstr "" msgid "%s required with method %s" msgstr "" msgid "lev" msgstr "" msgid "unbiased" msgstr "" msgid "%s must be positive" msgstr "" msgid "order" msgstr "" msgid "%s not used when %s is specified" msgstr "" msgid "nclass" msgstr "" msgid "breaks" msgstr "" msgid "%s ignored when %s is specified" msgstr "" msgid "group" msgstr "" msgid "invalid number of group boundaries and frequencies" msgstr "" msgid "missing frequencies replaced by zeros" msgstr "" msgid "missing values are not in the same positions in 'weights' and in 'ratios'" msgstr "" msgid "there must be at least two nodes at every level" msgstr "" msgid "invalid level number" msgstr "" msgid "infinite group boundaries" msgstr "" msgid "%s is an alias for %s, however they differ." msgstr "" msgid "probability" msgstr "" msgid "!freq" msgstr "" msgid "%s must be a named list" msgstr "" msgid "start" msgstr "" msgid "%s must be supplied as a function" msgstr "" msgid "fun" msgstr "" msgid "%s must be a numeric vector or an object of class %s" msgstr "" msgid "grouped.data" msgstr "" msgid "%s specifies names which are not arguments to %s" msgstr "" msgid "%s measure requires an object of class %s" msgstr "" msgid "chi-square" msgstr "" msgid "frequency must be larger than 0 in all groups" msgstr "" msgid "LAS" msgstr "" msgid "optimization failed" msgstr "" msgid "%s has many elements: only the first used" msgstr "" msgid "p0" msgstr "" msgid "%s must be a valid probability (between 0 and 1)" msgstr "" msgid "value of %s ignored with a zero-truncated distribution" msgstr "" msgid "value of %s missing" msgstr "" msgid "lambda" msgstr "" msgid "value of %s or %s missing" msgstr "" msgid "prob" msgstr "" msgid "frequency distribution not in the (a, b, 0) or (a, b, 1) families" msgstr "" msgid "Pr[S = 0] is numerically equal to 0; impossible to start the recursion" msgstr "" msgid "nodes" msgstr "" msgid "level names different in %s, %s and %s" msgstr "" msgid "one of %s or %s must be non-NULL" msgstr "" msgid "nothing to do" msgstr "" msgid "invalid %s specification" msgstr "" msgid "by" msgstr "" msgid "invalid first argument %s" msgstr "" msgid "n" msgstr "" msgid "invalid values in %s" msgstr "" msgid "no positive probabilities" msgstr "" msgid "invalid third argument %s" msgstr "" msgid "models" msgstr "" msgid "par.claims" msgstr "" msgid "par.wait" msgstr "" msgid "parameters %s missing in %s" msgstr "" msgid "," msgstr "" msgid "parameter %s missing in %s" msgstr "" msgid "parameter %s or %s missing in %s" msgstr "" msgid "rates" msgstr "" msgid "invalid parameters in %s" msgstr "" msgid "%s must be a vector or a matrix" msgstr "" actuar/po/actuar.pot0000644000176200001440000000650315147745722014174 0ustar liggesusers# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the actuar package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: actuar 3.3-4\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-11-07 14:41-0500\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" #: betaint.c:143 dpq.c:105 dpq.c:241 dpq.c:497 dpq.c:697 dpq.c:875 dpq.c:1055 #: dpqphtype.c:57 random.c:134 random.c:141 random.c:235 random.c:242 #: random.c:353 random.c:360 random.c:467 random.c:474 random.c:579 #: random.c:586 randomphtype.c:74 randomphtype.c:81 msgid "invalid arguments" msgstr "" #: dpq.c:208 msgid "internal error in actuar_do_dpq1" msgstr "" #: dpq.c:463 msgid "internal error in actuar_do_dpq2" msgstr "" #: dpq.c:659 msgid "internal error in actuar_do_dpq3" msgstr "" #: dpq.c:838 msgid "internal error in actuar_do_dpq4" msgstr "" #: dpq.c:1014 msgid "internal error in actuar_do_dpq5" msgstr "" #: dpq.c:1160 msgid "internal error in actuar_do_dpq6" msgstr "" #: dpqphtype.c:177 msgid "internal error in actuar_do_dpqphtype2" msgstr "" #: fpareto.c:186 fpareto.c:246 pareto2.c:139 pareto2.c:193 pareto3.c:144 #: pareto3.c:199 pareto4.c:160 pareto4.c:219 #, c-format msgid "'order' (%.2f) must be integer, rounded to %.0f" msgstr "" #: hierarc.c:100 invgauss.c:209 msgid "maximum number of iterations reached before obtaining convergence" msgstr "" #: invgauss.c:150 msgid "maximum number of iterations must be at least 1" msgstr "" #: invpareto.c:185 msgid "integration failed" msgstr "" #: panjer.c:71 panjer.c:114 msgid "" "maximum number of recursions reached before the probability distribution was " "complete" msgstr "" #: random.c:81 msgid "NAs produced" msgstr "" #: random.c:171 msgid "internal error in actuar_do_random1" msgstr "" #: random.c:287 msgid "internal error in actuar_do_random2" msgstr "" #: random.c:399 msgid "internal error in actuar_do_random3" msgstr "" #: random.c:509 msgid "internal error in actuar_do_random4" msgstr "" #: random.c:621 msgid "internal error in actuar_do_random5" msgstr "" #: random.c:651 msgid "internal error in actuar_do_random" msgstr "" #: randomphtype.c:101 msgid "non-square sub-intensity matrix" msgstr "" #: randomphtype.c:104 msgid "non-conformable arguments" msgstr "" #: randomphtype.c:123 msgid "internal error in actuar_do_randomphtype2" msgstr "" #: randomphtype.c:150 msgid "internal error in actuar_do_randomphtype" msgstr "" #: util.c:106 #, c-format msgid "LAPACK routine dgebal returned info code %d when permuting" msgstr "" #: util.c:110 #, c-format msgid "LAPACK routine dgebal returned info code %d when scaling" msgstr "" #: util.c:157 #, c-format msgid "LAPACK routine dgetrf returned info code %d" msgstr "" #: util.c:160 #, c-format msgid "LAPACK routine dgetrs returned info code %d" msgstr "" #: util.c:266 msgid "'A' is 0-diml" msgstr "" #: util.c:268 msgid "no right-hand side in 'B'" msgstr "" #: util.c:279 #, c-format msgid "argument %d of Lapack routine dgesv had invalid value" msgstr "" #: util.c:282 msgid "Lapack routine dgesv: system is exactly singular" msgstr "" actuar/po/R-fr.po0000644000176200001440000002325715147745722013344 0ustar liggesusers# French translations for actuar package # Traduction française du package actuar. # Copyright (C) 2016 Vincent Goulet # This file is distributed under the same license as the actuar package. # Vincent Goulet , 2010. # msgid "" msgstr "" "Project-Id-Version: actuar 2.0-0\n" "Report-Msgid-Bugs-To: bugs@r-project.org\n" "POT-Creation-Date: 2023-11-07 14:41\n" "PO-Revision-Date: 2023-11-07 14:45-0500\n" "Last-Translator: Vincent Goulet \n" "Language-Team: Vincent Goulet \n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" msgid "rows extracted in increasing order" msgstr "lignes extraites en ordre croissant" msgid "impossible to replace boundaries and frequencies simultaneously" msgstr "impossible de remplacer simultanément les bornes et les fréquences" msgid "only logical matrix subscripts are allowed in replacement" msgstr "seuls les indices logiques sont permis pour le remplacement" msgid "need 0, 1, or 2 subscripts" msgstr "il faut 0, 1 ou 2 indices" msgid "one of %s or %s is needed" msgstr "l'une ou l'autre de %s ou %s est requise" msgid "mgf.claim" msgstr "mgf.claim" msgid "h" msgstr "h" msgid "%s must be a function or an expression containing %s" msgstr "%s doit être une fonction ou une expression contenant %s" msgid "x" msgstr "x" msgid "mgf.wait" msgstr "mgf.wait" msgid "%s must be a function or an expression containing %s and %s" msgstr "%s doit être une fonction ou une expression contenant %s et %s" msgid "y" msgstr "y" msgid "%s must be a function when using reinsurance" msgstr "%s doit être une fonction en présence de réassurance" msgid "premium.rate" msgstr "premium.rate" msgid "%s must supply the mean and variance of the distribution" msgstr "%s doit contenir la moyenne et la variance de la distribution" msgid "moments" msgstr "moments" msgid "%s must supply the mean, variance and skewness of the distribution" msgstr "" "%s doit contenir la moyenne, la variance et l'asymétrie de la distribution" msgid "%s must supply the number of simulations" msgstr "%s doit spécifier le nombre de simulations" msgid "nb.simul" msgstr "nb.simul" msgid "expressions in %s and %s must be named" msgstr "les expressions dans %s et %s doivent être nommées" msgid "model.freq" msgstr "model.freq" msgid "model.sev" msgstr "model.sev" msgid "%s must be a vector of probabilities" msgstr "%s doit être un vecteur de probabilités" msgid "frequency distribution must be supplied as a character string" msgstr "" "la distribution de fréquence doit être spécifiée sous forme de chaîne de " "caractères" msgid "internal error" msgstr "erreur interne" msgid "function not defined for approximating distributions" msgstr "fonction non définie pour les méthodes d'approximation" msgid "lower bound of the likelihood missing" msgstr "seuil de la vraisemblance manquant" msgid "one of the Gamma prior parameter %s, %s or %s missing" msgstr "un des paramètres %s, %s ou %s de la loi Gamma manquant" msgid "shape" msgstr "shape" msgid "rate" msgstr "rate" msgid "scale" msgstr "scale" msgid "one of the Beta prior parameter %s or %s missing" msgstr "un des paramètres %s ou %s de la loi Bêta manquant" msgid "shape1" msgstr "shape1" msgid "shape2" msgstr "shape2" msgid "parameter %s of the likelihood missing" msgstr "paramètre %s de la vraisemblance manquant" msgid "size" msgstr "size" msgid "shape.lik" msgstr "shape.lik" msgid "sd.lik" msgstr "sd.lik" msgid "unsupported likelihood" msgstr "vraisemblance non valide" msgid "missing ratios not allowed when weights are not supplied" msgstr "ratios manquants non permis lorsque les poids ne sont pas fournis" msgid "there must be at least one node with more than one period of experience" msgstr "" "il y doit y avoir au moins un noeud avec plus d'une période d'expérience" msgid "there must be more than one node" msgstr "il doit y avoir plus d'un noeud" msgid "missing values are not in the same positions in %s and in %s" msgstr "" "les données manquantes ne sont pas aux mêmes positions dans %s et dans %s" msgid "weights" msgstr "weights" msgid "ratios" msgstr "ratios" msgid "no available data to fit model" msgstr "aucune donnée disponible pour la modélisation" msgid "maximum number of iterations reached before obtaining convergence" msgstr "nombre d'itérations maximal atteint avant obtention de la convergence" msgid "unsupported interactions in %s" msgstr "interactions non supportées dans %s" msgid "formula" msgstr "formula" msgid "hierarchical regression models not supported" msgstr "modèles de régression hiérarchiques non supportés" msgid "ratios have to be supplied if weights are" msgstr "ratios requis s'il y a des poids" msgid "empty regression model; fitting with Buhlmann-Straub's model" msgstr "modèle de régression vide; utilisation du modèle de Bühlmann-Straub" msgid "invalid level name" msgstr "nom de niveau incorrect" msgid "coverage modifications must be positive" msgstr "les modifications de couverture doivent être positives" msgid "deductible must be smaller than the limit" msgstr "la franchise doit être inférieure à la limite" msgid "coinsurance must be between 0 and 1" msgstr "le facteur de coassurance doit être entre 0 et 1" msgid "%s must be supplied" msgstr "%s doit être fourni" msgid "cdf" msgstr "cdf" msgid "%s required with method %s" msgstr "%s requis pour la méthode %s" msgid "lev" msgstr "lev" msgid "unbiased" msgstr "unbiased" msgid "%s must be positive" msgstr "%s doit être positif" msgid "order" msgstr "order" msgid "%s not used when %s is specified" msgstr "%s non utilisé quand %s est fourni" msgid "nclass" msgstr "nclass" msgid "breaks" msgstr "breaks" msgid "%s ignored when %s is specified" msgstr "%s ignoré quand %s est fourni" msgid "group" msgstr "group" msgid "invalid number of group boundaries and frequencies" msgstr "nombre de bornes de groupe et de fréquences incorrect" msgid "missing frequencies replaced by zeros" msgstr "fréquences manquantes remplacées par des zéros" msgid "" "missing values are not in the same positions in 'weights' and in 'ratios'" msgstr "" "les données manquantes ne sont pas aux mêmes positions dans les poids et " "dans les ratios" msgid "there must be at least two nodes at every level" msgstr "il doit y avoir au moins deux noeuds à chaque niveau" msgid "invalid level number" msgstr "numéro de niveau incorrect" msgid "infinite group boundaries" msgstr "bornes de groupe infinies" msgid "%s is an alias for %s, however they differ." msgstr "%s est un alias pour %s, cependant ils diffèrent." msgid "probability" msgstr "probability" msgid "!freq" msgstr "!freq" msgid "%s must be a named list" msgstr "%s doit être une liste nommée" msgid "start" msgstr "start" msgid "%s must be supplied as a function" msgstr "%s doit être fourni en tant que fonction" msgid "fun" msgstr "fun" msgid "%s must be a numeric vector or an object of class %s" msgstr "%s doit être un vecteur numérique ou un objet de classe %s" msgid "grouped.data" msgstr "grouped.data" msgid "%s specifies names which are not arguments to %s" msgstr "%s contient des noms qui ne sont pas des arguments de %s" msgid "%s measure requires an object of class %s" msgstr "la mesure %s requiert un objet de classe %s" msgid "chi-square" msgstr "chi-square" msgid "frequency must be larger than 0 in all groups" msgstr "la fréquence doit être supérieure à 0 dans tous les groupes" msgid "LAS" msgstr "LAS" msgid "optimization failed" msgstr "l'optimisation a échoué" msgid "%s has many elements: only the first used" msgstr "%s contient plusieurs éléments: seul le premier est utilisé" msgid "p0" msgstr "p0" msgid "%s must be a valid probability (between 0 and 1)" msgstr "%s doit être une probabilité (entre 0 et 1)" msgid "value of %s ignored with a zero-truncated distribution" msgstr "valeur de %s ignorée pour une distribution zéro tronquée" msgid "value of %s missing" msgstr "valeur de %s manquante" msgid "lambda" msgstr "lambda" msgid "value of %s or %s missing" msgstr "valeur de %s ou %s manquante" msgid "prob" msgstr "prob" msgid "frequency distribution not in the (a, b, 0) or (a, b, 1) families" msgstr "" "la distribution de fréquence ne fait pas partie des familles (a, b, 0) ou " "(a, b, 1)" msgid "Pr[S = 0] is numerically equal to 0; impossible to start the recursion" msgstr "" "valeur de Pr[S = 0] numériquement nulle; impossible de démarrer le calcul " "récursif" msgid "nodes" msgstr "nodes" msgid "level names different in %s, %s and %s" msgstr "noms de niveaux différents dans %s, %s et %s" msgid "one of %s or %s must be non-NULL" msgstr "un de %s ou %s doit ne pas être NULL" msgid "nothing to do" msgstr "rien à faire" msgid "invalid %s specification" msgstr "valeur de %s incorrecte" msgid "by" msgstr "by" msgid "invalid first argument %s" msgstr "premier argument %s incorrect" msgid "n" msgstr "n" msgid "invalid values in %s" msgstr "valeurs incorrectes dans %s" msgid "no positive probabilities" msgstr "aucune probabilité positive" msgid "invalid third argument %s" msgstr "troisième argument %s incorrect" msgid "models" msgstr "models" msgid "par.claims" msgstr "par.claims" msgid "par.wait" msgstr "par.wait" msgid "parameters %s missing in %s" msgstr "paramètres %s manquants dans %s" msgid "," msgstr "," msgid "parameter %s missing in %s" msgstr "paramètre %s manquant dans %s" msgid "parameter %s or %s missing in %s" msgstr "paramètre %s ou %s manquant dans %s" msgid "rates" msgstr "rates" msgid "invalid parameters in %s" msgstr "paramètres incorrects dans %s" msgid "%s must be a vector or a matrix" msgstr "%s doit être un vecteur ou une matrice" actuar/po/R-it.po0000644000176200001440000002271415147745722013346 0ustar liggesusers# Italian translation for actuar package # Copyright (C) 2022 Daniele Medri # This file is distributed under the same license as the actuar package. # Daniele Medri , 2022. # msgid "" msgstr "" "Project-Id-Version: actuar 2.0-0\n" "Report-Msgid-Bugs-To: bugs@r-project.org\n" "POT-Creation-Date: 2023-11-07 14:41\n" "PO-Revision-Date: 2023-11-07 14:46-0500\n" "Last-Translator: Daniele Medri \n" "Language-Team: Daniele Medri \n" "Language: it\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "X-Generator: Poedit 2.4.2\n" msgid "rows extracted in increasing order" msgstr "righe estratte in ordine crescente" msgid "impossible to replace boundaries and frequencies simultaneously" msgstr "non è possibile sostituire estremi e frequenze contemporaneamente" msgid "only logical matrix subscripts are allowed in replacement" msgstr "in sostituzione sono consentiti solo pedici di matrice logica" msgid "need 0, 1, or 2 subscripts" msgstr "richiede 0, 1 o due indici" msgid "one of %s or %s is needed" msgstr "richiesto uno di %s o %s" msgid "mgf.claim" msgstr "mgf.claim" msgid "h" msgstr "h" msgid "%s must be a function or an expression containing %s" msgstr "%s dev'essere una funzione o un'espressione contenente %s" msgid "x" msgstr "x" msgid "mgf.wait" msgstr "mgf.wait" msgid "%s must be a function or an expression containing %s and %s" msgstr "%s dev'essere una funzione o un'espressione contenente %s e %s" msgid "y" msgstr "y" msgid "%s must be a function when using reinsurance" msgstr "%s dev'essere una funzione quando si utilizza la riassicurazione" msgid "premium.rate" msgstr "premium.rate" msgid "%s must supply the mean and variance of the distribution" msgstr "%s deve fornire la media e la varianza della distribuzione" msgid "moments" msgstr "moments" msgid "%s must supply the mean, variance and skewness of the distribution" msgstr "" "%s deve fornire la media, la varianza e l'asimmetria della distribuzione" msgid "%s must supply the number of simulations" msgstr "%s deve indicare il numero di simulazioni" msgid "nb.simul" msgstr "nb.simul" msgid "expressions in %s and %s must be named" msgstr "le espressioni in %s e %s devono essere indicate" msgid "model.freq" msgstr "model.freq" msgid "model.sev" msgstr "model.sev" msgid "%s must be a vector of probabilities" msgstr "%s dev'essere un vettore di probabilità" msgid "frequency distribution must be supplied as a character string" msgstr "" "la distribuzione di frequenza devono essere passate come una stringa " "carattere" msgid "internal error" msgstr "errore interno" msgid "function not defined for approximating distributions" msgstr "funzione non definita per approssimare distribuzioni" msgid "lower bound of the likelihood missing" msgstr "estremo inferiore mancante per la verosimiglianza" msgid "one of the Gamma prior parameter %s, %s or %s missing" msgstr "manca uno dei parametri Gamma a priori tra %s, %s o %s" msgid "shape" msgstr "shape" msgid "rate" msgstr "rate" msgid "scale" msgstr "scale" msgid "one of the Beta prior parameter %s or %s missing" msgstr "manca uno dei parametri Beta a priori tra %s o %s" msgid "shape1" msgstr "shape1" msgid "shape2" msgstr "shape2" msgid "parameter %s of the likelihood missing" msgstr "parametro %s mancante per la verosimiglianza" msgid "size" msgstr "size" msgid "shape.lik" msgstr "shape.lik" msgid "sd.lik" msgstr "sd.lik" msgid "unsupported likelihood" msgstr "verosimiglianza non supportata" msgid "missing ratios not allowed when weights are not supplied" msgstr "non sono ammessi rapporti mancanti quando i pesi non sono indicati" msgid "there must be at least one node with more than one period of experience" msgstr "dev'esserci almeno un nodo con più di un periodo di esperienza" msgid "there must be more than one node" msgstr "dev'esserci più di un nodo" msgid "missing values are not in the same positions in %s and in %s" msgstr "i valori mancanti non sono nelle medesime posizioni in %s e in %s" msgid "weights" msgstr "weights" msgid "ratios" msgstr "ratios" msgid "no available data to fit model" msgstr "non ci sono abbastanza dati per stimare il modello" msgid "maximum number of iterations reached before obtaining convergence" msgstr "raggiunto il numero massimo di iterazioni prima della convergenza" msgid "unsupported interactions in %s" msgstr "interazioni non supportate in %s" msgid "formula" msgstr "formula" msgid "hierarchical regression models not supported" msgstr "modelli di regressione gerarchica non supportati" msgid "ratios have to be supplied if weights are" msgstr "i rapporti devono essere passati se i pesi sono" msgid "empty regression model; fitting with Buhlmann-Straub's model" msgstr "modello di regressione vuoto; stima con il modello Buhlmann-Straub" msgid "invalid level name" msgstr "nome livello non valido" msgid "coverage modifications must be positive" msgstr "le modifiche alla copertura devono essere positive" msgid "deductible must be smaller than the limit" msgstr "deductible dev'essere più piccolo del limite" msgid "coinsurance must be between 0 and 1" msgstr "coinsurance dev'essere tra 0 e 1" msgid "%s must be supplied" msgstr "%s dev'essere passata" msgid "cdf" msgstr "cdf" msgid "%s required with method %s" msgstr "%s richiesto con il metodo %s" msgid "lev" msgstr "lev" msgid "unbiased" msgstr "unbiased" msgid "%s must be positive" msgstr "%s dev'essere positivo" msgid "order" msgstr "order" msgid "%s not used when %s is specified" msgstr "%s non viene usata quando viene specificato %s" msgid "nclass" msgstr "nclass" msgid "breaks" msgstr "breaks" msgid "%s ignored when %s is specified" msgstr "%s ignorato quando %s è presente" msgid "group" msgstr "group" msgid "invalid number of group boundaries and frequencies" msgstr "numero di estremi di gruppo e frequenze non valido" msgid "missing frequencies replaced by zeros" msgstr "frequenze mancanti sostituite con zero" msgid "" "missing values are not in the same positions in 'weights' and in 'ratios'" msgstr "" "i valori mancanti non sono nelle medesime posizioni in 'weights' e in " "'ratios'" msgid "there must be at least two nodes at every level" msgstr "devono esserci almeno due nodi in ogni livello" msgid "invalid level number" msgstr "numero livello non valido" msgid "infinite group boundaries" msgstr "estremi di gruppo non finiti" msgid "%s is an alias for %s, however they differ." msgstr "%s è un alisa per %s, comunque sono differenti." msgid "probability" msgstr "probability" msgid "!freq" msgstr "!freq" msgid "%s must be a named list" msgstr "%s dev'essere una lista nominata" msgid "start" msgstr "start" msgid "%s must be supplied as a function" msgstr "%s dev'essere passata come una funzione" msgid "fun" msgstr "fun" msgid "%s must be a numeric vector or an object of class %s" msgstr "%s dev'essere un vettore numerico o un oggetto di classe %s" msgid "grouped.data" msgstr "grouped.data" msgid "%s specifies names which are not arguments to %s" msgstr "%s specifica nomi che non sono argomenti per %s" msgid "%s measure requires an object of class %s" msgstr "la misura %s richiede un oggetto di classe %s" msgid "chi-square" msgstr "chi-square" msgid "frequency must be larger than 0 in all groups" msgstr "la frequenza dev'essere più grande di 0 in tutti i gruppi" msgid "LAS" msgstr "LAS" msgid "optimization failed" msgstr "ottimizzazione fallita" msgid "%s has many elements: only the first used" msgstr "%s ha molti elementi: solo il primo è utilizzato" msgid "p0" msgstr "p0" msgid "%s must be a valid probability (between 0 and 1)" msgstr "%s dev'essere una probabilità valida (tra 0 e 1)" msgid "value of %s ignored with a zero-truncated distribution" msgstr "valore di %s ignorato con una distribuzione troncata zero" msgid "value of %s missing" msgstr "valore di %s mancante" msgid "lambda" msgstr "lambda" msgid "value of %s or %s missing" msgstr "valore di %s o %s mancante" msgid "prob" msgstr "prob" msgid "frequency distribution not in the (a, b, 0) or (a, b, 1) families" msgstr "distribuzione di frequenza non nelle famiglie (a, b, 0) o (a, b, 1)" msgid "Pr[S = 0] is numerically equal to 0; impossible to start the recursion" msgstr "" "Pr[S = 0] è numericamente uguale a 0; non è possibile avviare la ricorsione" msgid "nodes" msgstr "nodes" msgid "level names different in %s, %s and %s" msgstr "nomi livello differenti in %s, %s e %s" msgid "one of %s or %s must be non-NULL" msgstr "uno di %s o %s non dev'essere NULL" msgid "nothing to do" msgstr "niente da fare" msgid "invalid %s specification" msgstr "specificazione di %s non valida" msgid "by" msgstr "by" msgid "invalid first argument %s" msgstr "primo argomento %s non valido" msgid "n" msgstr "n" msgid "invalid values in %s" msgstr "valori non validi in %s" msgid "no positive probabilities" msgstr "nessuna probabilità positiva" msgid "invalid third argument %s" msgstr "terzo argomento %s non valido" msgid "models" msgstr "models" msgid "par.claims" msgstr "par.claims" msgid "par.wait" msgstr "par.wait" msgid "parameters %s missing in %s" msgstr "parametri %s mancanti in %s" msgid "," msgstr "," msgid "parameter %s missing in %s" msgstr "parametri %s mancanti in %s" msgid "parameter %s or %s missing in %s" msgstr "parametri %s o %s mancanti in %s" msgid "rates" msgstr "rates" msgid "invalid parameters in %s" msgstr "parametri non validi in %s" msgid "%s must be a vector or a matrix" msgstr "%s dev'essere un vettore numerico o una matrice" actuar/po/fr.po0000644000176200001440000001151315147745722013135 0ustar liggesusers# French translations for actuar package # Traduction française du package actuar. # Copyright (C) 2007 Vincent Goulet # This file is distributed under the same license as the actuar package. # Vincent Goulet , 2007. # msgid "" msgstr "" "Project-Id-Version: actuar 1.1-7\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2022-10-27 15:25-0400\n" "PO-Revision-Date: 2020-06-03 12:33-0400\n" "Last-Translator: Vincent Goulet \n" "Language-Team: Vincent Goulet \n" "Language: fr\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" #: betaint.c:143 dpq.c:105 dpq.c:241 dpq.c:497 dpq.c:697 dpq.c:875 dpq.c:1055 #: dpqphtype.c:57 random.c:134 random.c:141 random.c:235 random.c:242 #: random.c:353 random.c:360 random.c:467 random.c:474 random.c:579 #: random.c:586 randomphtype.c:74 randomphtype.c:81 msgid "invalid arguments" msgstr "arguments incorrects" #: dpq.c:208 msgid "internal error in actuar_do_dpq1" msgstr "erreur interne dans actuar_do_dpq1" #: dpq.c:463 msgid "internal error in actuar_do_dpq2" msgstr "erreur interne dans actuar_do_dpq2" #: dpq.c:659 msgid "internal error in actuar_do_dpq3" msgstr "erreur interne dans actuar_do_dpq3" #: dpq.c:838 msgid "internal error in actuar_do_dpq4" msgstr "erreur interne dans actuar_do_dpq4" #: dpq.c:1014 msgid "internal error in actuar_do_dpq5" msgstr "erreur interne dans actuar_do_dpq5" #: dpq.c:1160 msgid "internal error in actuar_do_dpq6" msgstr "erreur interne dans actuar_do_dpq6" #: dpqphtype.c:177 msgid "internal error in actuar_do_dpqphtype2" msgstr "erreur interne dans actuar_do_dpqphtype2" #: fpareto.c:186 fpareto.c:246 pareto2.c:139 pareto2.c:193 pareto3.c:144 #: pareto3.c:199 pareto4.c:160 pareto4.c:219 #, c-format msgid "'order' (%.2f) must be integer, rounded to %.0f" msgstr "'order' (%.2f) doit être entier, arrondi à %.0f" #: hierarc.c:100 invgauss.c:209 msgid "maximum number of iterations reached before obtaining convergence" msgstr "nombre d'itérations maximal atteint avant obtention de la convergence" #: invgauss.c:150 msgid "maximum number of iterations must be at least 1" msgstr "le nombre d'itérations maximal doit être au moins 1" #: invpareto.c:185 msgid "integration failed" msgstr "l'intégration a échoué" #: panjer.c:71 panjer.c:114 msgid "" "maximum number of recursions reached before the probability distribution was " "complete" msgstr "nombre de récursions maximal atteint avant obtention de la convergence" #: random.c:81 msgid "NAs produced" msgstr "production de NA" #: random.c:171 msgid "internal error in actuar_do_random1" msgstr "erreur interne dans actuar_do_random1" #: random.c:287 msgid "internal error in actuar_do_random2" msgstr "erreur interne dans actuar_do_random2" #: random.c:399 msgid "internal error in actuar_do_random3" msgstr "erreur interne dans actuar_do_random3" #: random.c:509 msgid "internal error in actuar_do_random4" msgstr "erreur interne dans actuar_do_random4" #: random.c:621 msgid "internal error in actuar_do_random5" msgstr "erreur interne dans actuar_do_random5" #: random.c:651 msgid "internal error in actuar_do_random" msgstr "erreur interne dans actuar_do_random" #: randomphtype.c:101 msgid "non-square sub-intensity matrix" msgstr "matrice de sous-intensité non carrée" #: randomphtype.c:104 msgid "non-conformable arguments" msgstr "arguments non conformes" #: randomphtype.c:123 msgid "internal error in actuar_do_randomphtype2" msgstr "erreur interne dans actuar_do_randomphtype2" #: randomphtype.c:150 msgid "internal error in actuar_do_randomphtype" msgstr "erreur interne dans actuar_do_randomphtype" #: util.c:106 #, c-format msgid "LAPACK routine dgebal returned info code %d when permuting" msgstr "" "la procédure LAPACK dgebal a produit le code d'erreur %d lors de la " "permutation" #: util.c:110 #, c-format msgid "LAPACK routine dgebal returned info code %d when scaling" msgstr "" "la procédure LAPACK dgebal a produit le code d'erreur %d lors de la mise à " "l'échelle" #: util.c:157 #, c-format msgid "LAPACK routine dgetrf returned info code %d" msgstr "la procédure LAPACK dgetrf a produit le code d'erreur %d" #: util.c:160 #, c-format msgid "LAPACK routine dgetrs returned info code %d" msgstr "la procédure LAPACK dgetrs a produit le code d'erreur %d" #: util.c:266 msgid "'A' is 0-diml" msgstr "'A' est de dimension nulle" #: util.c:268 msgid "no right-hand side in 'B'" msgstr "aucun membre de droite dans 'B'" #: util.c:279 #, c-format msgid "argument %d of Lapack routine dgesv had invalid value" msgstr "valeur incorrecte pour l'argument %d du sous-programme dgesv de Lapack" #: util.c:282 msgid "Lapack routine dgesv: system is exactly singular" msgstr "sous-programme Lapack dgesv: le système est exactement singulier" actuar/po/it.po0000644000176200001440000001143215147745722013142 0ustar liggesusers# Italian translation for actuar package # Copyright (C) 2022 Daniele Medri # This file is distributed under the same license as the actuar package. # Daniele Medri , 2022. # msgid "" msgstr "" "Project-Id-Version: actuar 1.1-7\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2022-10-27 15:25-0400\n" "PO-Revision-Date: 2022-04-13 11:12+0200\n" "Last-Translator: Daniele Medri \n" "Language-Team: Daniele Medri \n" "Language: it_IT\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "X-Generator: Poedit 2.4.2\n" #: betaint.c:143 dpq.c:105 dpq.c:241 dpq.c:497 dpq.c:697 dpq.c:875 dpq.c:1055 #: dpqphtype.c:57 random.c:134 random.c:141 random.c:235 random.c:242 #: random.c:353 random.c:360 random.c:467 random.c:474 random.c:579 #: random.c:586 randomphtype.c:74 randomphtype.c:81 msgid "invalid arguments" msgstr "argomenti non validi" #: dpq.c:208 msgid "internal error in actuar_do_dpq1" msgstr "errore interno in actuar_do_dpq1" #: dpq.c:463 msgid "internal error in actuar_do_dpq2" msgstr "errore interno in actuar_do_dpq2" #: dpq.c:659 msgid "internal error in actuar_do_dpq3" msgstr "errore interno in actuar_do_dpq3" #: dpq.c:838 msgid "internal error in actuar_do_dpq4" msgstr "errore interno in actuar_do_dpq4" #: dpq.c:1014 msgid "internal error in actuar_do_dpq5" msgstr "errore interno in actuar_do_dpq5" #: dpq.c:1160 msgid "internal error in actuar_do_dpq6" msgstr "errore interno in actuar_do_dpq6" #: dpqphtype.c:177 msgid "internal error in actuar_do_dpqphtype2" msgstr "errore interno in actuar_do_dpqphtype2" #: fpareto.c:186 fpareto.c:246 pareto2.c:139 pareto2.c:193 pareto3.c:144 #: pareto3.c:199 pareto4.c:160 pareto4.c:219 #, c-format msgid "'order' (%.2f) must be integer, rounded to %.0f" msgstr "'order' (%.2f) dev'essere un intero, arrotondato a %.0f" #: hierarc.c:100 invgauss.c:209 msgid "maximum number of iterations reached before obtaining convergence" msgstr "raggiunto il numero massimo di iterazioni prima della convergenza" #: invgauss.c:150 msgid "maximum number of iterations must be at least 1" msgstr "il numero massimo di iterazioni dev'essere almeno 1" #: invpareto.c:185 msgid "integration failed" msgstr "integrazione fallita" #: panjer.c:71 panjer.c:114 msgid "" "maximum number of recursions reached before the probability distribution was " "complete" msgstr "" "raggiunto il numero massimo di ricorsioni prima che la distribuzione di " "probabilità fosse completata" #: random.c:81 msgid "NAs produced" msgstr "Generati valori NA" #: random.c:171 msgid "internal error in actuar_do_random1" msgstr "errore interno in actuar_do_random1" #: random.c:287 msgid "internal error in actuar_do_random2" msgstr "errore interno in actuar_do_random2" #: random.c:399 msgid "internal error in actuar_do_random3" msgstr "errore interno in actuar_do_random3" #: random.c:509 msgid "internal error in actuar_do_random4" msgstr "errore interno in actuar_do_random4" #: random.c:621 msgid "internal error in actuar_do_random5" msgstr "errore interno in actuar_do_random5" #: random.c:651 msgid "internal error in actuar_do_random" msgstr "errore interno in actuar_do_random" #: randomphtype.c:101 msgid "non-square sub-intensity matrix" msgstr "matrice non quadrata" #: randomphtype.c:104 msgid "non-conformable arguments" msgstr "gli argomenti non sono compatibili" #: randomphtype.c:123 msgid "internal error in actuar_do_randomphtype2" msgstr "errore interno in actuar_do_randomphtype2" #: randomphtype.c:150 msgid "internal error in actuar_do_randomphtype" msgstr "errore interno in actuar_do_randomphtype" #: util.c:106 #, c-format msgid "LAPACK routine dgebal returned info code %d when permuting" msgstr "" "La routine dgebal di LAPACK ha restituito il codice informativo %d durante " "la permutazione" #: util.c:110 #, c-format msgid "LAPACK routine dgebal returned info code %d when scaling" msgstr "" "La routine dgebal di LAPACK ha restituito il codice informativo %d durante " "lo scaling" #: util.c:157 #, c-format msgid "LAPACK routine dgetrf returned info code %d" msgstr "La routine dgetrf di LAPACK ha restituito il codice informativo %d" #: util.c:160 #, c-format msgid "LAPACK routine dgetrs returned info code %d" msgstr "La routine dgetrs di LAPACK ha restituito il codice informativo %d" #: util.c:266 msgid "'A' is 0-diml" msgstr "'A' è 0-diml" #: util.c:268 msgid "no right-hand side in 'B'" msgstr "nessun membro di destra in 'B'" #: util.c:279 #, c-format msgid "argument %d of Lapack routine dgesv had invalid value" msgstr "l'argomento %d della routine dgesv di Lapack ha un valore non valido" #: util.c:282 msgid "Lapack routine dgesv: system is exactly singular" msgstr "La routine dgesv di Lapack: il sistema è esattamente singolare" actuar/R/0000755000176200001440000000000015147745722011750 5ustar liggesusersactuar/R/quantile.aggregateDist.R0000644000176200001440000000423215147745722016467 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Quantiles for objects of class 'aggregateDist' ### ### AUTHORS: Louis-Philippe Pouliot, ### Vincent Goulet quantile.aggregateDist <- function(x, probs = c(0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 0.995), smooth = FALSE, names = TRUE, ...) { chkDots(...) # method does not use '...' label <- comment(x) ## The Normal and Normal Power approximations are the only ## continuous distributions of class 'aggregateDist'. They are ## therefore treated differently, using the 'base' quantile ## function qnorm(). if (label == "Normal approximation") res <- qnorm(probs, get("mean", environment(x)), sqrt(get("variance", environment(x)))) else if (label == "Normal Power approximation") { m <- get("mean", envir = environment(x)) sd <- sqrt(get("variance", envir = environment(x))) sk <- get("skewness", envir = environment(x)) ## Calling qnorm() and inverting the Normal Power 'standardization' q <- qnorm(probs) res <- ifelse(probs <= 0.5, NA, m + sd * (q + sk * (q^2 - 1)/6)) } else { ## An empirical and discrete approach is used for ## 'aggregateDist' objects obtained from methods other than ## Normal and Normal Power. y <- get("y", environment(x)) x <- get("x", environment(x)) ## Create the inverse function of either the cdf or the ogive. fun <- if (smooth) # ogive approxfun(y, x, yleft = 0, yright = max(x), method = "linear", ties = "ordered") else # cdf approxfun(y, x, yleft = 0, yright = max(x), method = "constant", f = 1, ties = "ordered") ## Quantiles res <- fun(probs) } if (names) { dig <- max(2, getOption("digits")) names(res) <- formatC(paste(100 * probs, "%", sep = ""), format = "fg", width = 1, digits = dig) } res } actuar/R/panjer.R0000644000176200001440000001345015147745722013355 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Panjer recursion formula to compute the approximate aggregate ### claim amount distribution of a portfolio over a period. ### ### AUTHORS: Vincent Goulet , ### Sebastien Auclair, Louis-Philippe Pouliot and Tommy Ouellet panjer <- function(fx, dist, p0 = NULL, x.scale = 1, ..., convolve = 0, tol = sqrt(.Machine$double.eps), maxit = 500, echo = FALSE) { ## Express 'tol' as a value close to 1. If needed, modify the ## accuracy level so that the user specified level is attained ## *after* the additional convolutions (without getting too high). tol <- if (convolve > 0) min((0.5 - tol + 0.5)^(0.5 ^ convolve), 0.5 - sqrt(.Machine$double.eps) + 0.5) else 0.5 - tol + 0.5 ## Check if p0 is a valid probability. if (!is.null(p0)) { if (length(p0) > 1L) { p0 <- p0[1L] warning(sprintf("%s has many elements: only the first used", sQuote("p0"))) } if ((p0 < 0) || (p0 > 1)) stop(sprintf("%s must be a valid probability (between 0 and 1)", sQuote("p0"))) } ## Treat trivial case where 'p0 == 1' and hence F_S(0) = 1. if (identical(p0, 1)) { FUN <- approxfun(0, 1, method = "constant", yleft = 0, yright = 1, f = 0) class(FUN) <- c("ecdf", "stepfun", class(FUN)) assign("fs", 1, envir = environment(FUN)) assign("x.scale", x.scale, envir = environment(FUN)) return(FUN) } ## The call to .External below requires 'p1' to be initialized. p1 <- 0 ## Argument '...' should contain the values of the parameters of ## 'dist'. par <- list(...) ## Distributions are expressed as a member of the (a, b, 0) or (a, ## b, 1) families of distributions. Assign parameters 'a' and 'b' ## depending of the chosen distribution and compute f_S(0) in ## every case, and p1 if p0 is specified in argument. ## ## At this point, either p0 is NULL or 0 <= p0 < 1. if (startsWith(dist, "zero-truncated")) { if (!(is.null(p0) || identical(p0, 0))) warning(sprintf("value of %s ignored with a zero-truncated distribution", sQuote("p0"))) dist <- sub("zero-truncated ", "", dist) # drop "zero truncated" prefix p0 <- 0 } if (startsWith(dist, "zero-modified")) dist <- sub("zero-modified ", "", dist) # drop "zero modified" prefix if (dist == "geometric") { dist <- "negative binomial" par$size <- 1 } if (dist == "poisson") { if (!"lambda" %in% names(par)) stop(sprintf("value of %s missing", sQuote("lambda"))) lambda <- par$lambda a <- 0 b <- lambda if (is.null(p0)) # standard Poisson fs0 <- exp(lambda * (fx[1L] - 1)) else # 0 <= p0 < 1; zero-truncated/modified Poisson { fs0 <- p0 + (1 - p0) * pgfztpois(fx[1L], lambda) p1 <- (1 - p0) * dztpois(1, lambda) } } else if (dist == "negative binomial") { if (!all(c("prob", "size") %in% names(par))) stop(sprintf("value of %s or %s missing", sQuote("prob"), sQuote("size"))) r <- par$size p <- par$prob a <- 1 - p b <- (r - 1) * a if (is.null(p0)) # standard negative binomial fs0 <- exp(-r * log1p(-a/p * (fx[1L] - 1))) else # 0 <= p0 < 1; zero-truncated/modified neg. binomial { fs0 <- p0 + (1 - p0) * pgfztnbinom(fx[1L], r, p) p1 <- (1 - p0) * dztnbinom(1, r, p) } } else if (dist == "binomial") { if (!all(c("prob", "size") %in% names(par))) stop(sprintf("value of %s or %s missing", sQuote("prob"), sQuote("size"))) n <- par$size p <- par$prob a <- p/(p - 1) # equivalent to -p/(1 - p) b <- -(n + 1) * a if (is.null(p0)) # standard binomial fs0 <- exp(n * log1p(p * (fx[1L] - 1))) else # 0 <= p0 < 1; zero-truncated/modified binomial { fs0 <- p0 + (1 - p0) * pgfztbinom(fx[1L], n, p) p1 <- (1 - p0) * dztbinom(1, n, p) } } else if (dist == "logarithmic") { if (!"prob" %in% names(par)) stop(sprintf("value of %s missing", sQuote("prob"))) a <- par$prob b <- -a if (is.null(p0) || identical(p0, 0)) # standard logarithmic fs0 <- pgflogarithmic(fx[1L], a) else # 0 < p0 < 1; zero-modified logarithmic { fs0 <- p0 + (1 - p0) * pgflogarithmic(fx[1L], a) p1 <- (1 - p0) * dlogarithmic(1, a) } } else stop("frequency distribution not in the (a, b, 0) or (a, b, 1) families") ## If fs0 is equal to zero, the recursion will not start. There is ## no provision to automatically cope with this situation in the ## current version of this function. Just issue an error message ## and let the user do the work by hand. if (identical(fs0, 0)) stop("Pr[S = 0] is numerically equal to 0; impossible to start the recursion") ## Recursive calculations in C. fs <- .External(C_actuar_do_panjer, p0, p1, fs0, fx, a, b, convolve, tol, maxit, echo) FUN <- approxfun((0:(length(fs) - 1)) * x.scale, pmin(cumsum(fs), 1), method = "constant", yleft = 0, yright = 1, f = 0, ties = "ordered") class(FUN) <- c("ecdf", "stepfun", class(FUN)) assign("fs", fs, envir = environment(FUN)) assign("x.scale", x.scale, envir = environment(FUN)) FUN } actuar/R/Pareto4.R0000644000176200001440000000324415147745722013414 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}pareto4 functions to compute ### characteristics of the Pareto (type) IV distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = 1 - (1/(1 + v))^shape1, x > min, ### ### where v = ((x - min)/scale)^shape2. ### ### See Arnold, B. C. (2015), Pareto Distributions, Second Edition, ### CRC Press. ### ### AUTHOR: Vincent Goulet dpareto4 <- function(x, min, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dpareto4", x, min, shape1, shape2, scale, log) ppareto4 <- function(q, min, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ppareto4", q, min, shape1, shape2, scale, lower.tail, log.p) qpareto4 <- function(p, min, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qpareto4", p, min, shape1, shape2, scale, lower.tail, log.p) rpareto4 <- function(n, min, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rpareto4", n, min, shape1, shape2, scale) mpareto4 <- function(order, min, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mpareto4", order, min, shape1, shape2, scale, FALSE) levpareto4 <- function(limit, min, shape1, shape2, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levpareto4", limit, min, shape1, shape2, scale, order, FALSE) actuar/R/TransformedGamma.R0000644000176200001440000000325715147745722015331 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}trgamma functions to compute ### characteristics of the Transformed Gamma distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = pgamma((x/scale)^shape2, shape1, scale = 1), x > 0 ### ### or, equivalently, ### ### Pr[X <= x] = pgamma(x^shape2, shape1, scale = scale^shape2), x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dtrgamma <- function (x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dtrgamma", x, shape1, shape2, scale, log) ptrgamma <- function(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ptrgamma", q, shape1, shape2, scale, lower.tail, log.p) qtrgamma <- function(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qtrgamma", p, shape1, shape2, scale, lower.tail, log.p) rtrgamma <- function(n, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rtrgamma", n, shape1, shape2, scale) mtrgamma <- function(order, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mtrgamma", order, shape1, shape2, scale, FALSE) levtrgamma <- function(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levtrgamma", limit, shape1, shape2, scale, order, FALSE) actuar/R/mde.R0000644000176200001440000001115215147745722012640 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Minimum distance estimation. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet mde <- function(x, fun, start, measure = c("CvM", "chi-square", "LAS"), weights = NULL, ...) { ## General form of the function to minimize. myfn <- function(parm, x, weights, ...) { y <- G(parm, x, ...) - Gn(x) drop(crossprod(weights * y, y)) } ## Extract call; used to build the call to optim(). Call <- match.call(expand.dots = TRUE) ## Argument checking if (missing(start) || !is.list(start)) stop(sprintf("%s must be a named list", sQuote("start"))) if (missing(fun) || !(is.function(fun))) stop(sprintf("%s must be supplied as a function", sQuote("fun"))) grouped <- inherits(x, "grouped.data") if (!(is.numeric(x) || grouped)) stop(sprintf("%s must be a numeric vector or an object of class %s", sQuote("x"), dQuote("grouped.data"))) ## Make sure that any argument of 'fun' specified in '...' is held ## fixed. dots <- names(list(...)) dots <- dots[!is.element(dots, c("upper", "lower"))] start <- start[!is.element(names(start), dots)] ## Adapt 'fun' to our needs; taken from MASS::fitdistr. nm <- names(start) f <- formals(fun) args <- names(f) m <- match(nm, args) if (any(is.na(m))) stop(sprintf("%s specifies names which are not arguments to %s", sQuote("start"), sQuote("fun"))) formals(fun) <- c(f[c(1, m)], f[-c(1, m)]) # reorder arguments fn <- function(parm, x, ...) fun(x, parm, ...) if ((l <- length(nm)) > 1) body(fn) <- parse(text = paste("fun(x,", paste("parm[", 1:l, "]", collapse = ", "), ")")) measure <- match.arg(measure) ## Cramer-von Mises. Use the true and empirical cdf for individual ## data, or the true cdf and the ogive for grouped data. if (measure == "CvM") { G <- fn Gn <- if (grouped) ogive(x) else ecdf(x) if (is.null(weights)) weights <- 1 Call$x <- knots(Gn) Call$par <- start } ## Modified Chi-square. if (measure == "chi-square") { if (!grouped) stop(sprintf("%s measure requires an object of class %s", dQuote("chi-square"), dQuote("grouped.data"))) if (any((nj <- x[, 2]) == 0)) stop("frequency must be larger than 0 in all groups") og <- ogive(x) x <- knots(og) n <- sum(nj) G <- function(...) n * diff(fn(...)) Gn <- function(...) n * diff(og(...)) if (is.null(weights)) weights <- 1/nj Call$x <- x Call$par <- start } ## Layer average severity. if (measure == "LAS") { if (!grouped) stop(sprintf("%s measure requires an object of class %s", dQuote("LAS"), dQuote("grouped.data"))) e <- elev(x) x <- knots(e) G <- function(...) diff(fn(...)) Gn <- function(...) diff(e(...)) if (is.null(weights)) weights <- 1 Call$x <- x Call$par <- start } ## optim() call Call[[1]] <- as.name("optim") Call$fun <- Call$start <- Call$measure <- NULL Call$fn <- myfn Call$weights <- weights Call$hessian <- FALSE if (is.null(Call$method)) { if (any(c("lower", "upper") %in% names(Call))) Call$method <- "L-BFGS-B" else if (length(start) > 1) Call$method <- "BFGS" else Call$method <- "Nelder-Mead" } res <- eval(Call) ## Return result if (res$convergence > 0) stop("optimization failed") structure(list(estimate = res$par, distance = res$value), class = c("mde","list")) } print.mde <- function(x, digits = getOption("digits"), ...) { ans1 <- format(x$estimate, digits = digits) ans1 <- sapply(ans1, function(x) paste("", x)) nm1 <- names(ans1) nm1 <- paste(substring(" ", 1L, (nchar(ans1) - nchar(nm1)) %/% 2), nm1) nm1 <- paste(nm1, substring(" ", 1L, (nchar(ans1) - nchar(nm1)) %/% 2 + 1)) names(ans1) <- nm1 ans2 <- format(x$distance, digits = digits) ans2 <- sapply(ans2, function(x) paste("", x)) nm2 <- "distance" nm2 <- paste(substring(" ", 1L, (nchar(ans2) - nchar(nm2)) %/% 2), nm2) nm2 <- paste(nm2, substring(" ", 1L, (nchar(ans2) - nchar(nm2)) %/% 2)) names(ans2) <- nm2 print(ans1, quote = FALSE) cat("\n") print(ans2, quote = FALSE) invisible(x) } actuar/R/InverseGaussian.R0000644000176200001440000000374715147745722015214 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev,mgf}invgauss functions to compute ### characteristics of the Inverse Gaussian distribution. ### ### Functions [dpq]invgauss rely on C implementations of functions of ### the same name in package statmod. See: ### ### Giner, G. and Smyth, G. K. (2016), "statmod: Probability ### Calculations for the Inverse Gaussian Distribution", R Journal, ### vol. 8, no 1, p. 339-351. ### https://journal.r-project.org/archive/2016-1/giner-smyth.pdf ### ### Chhikara, R. S. and Folk, T. L. (1989), The Inverse Gaussian ### Distribution: Theory, Methodology and Applications}, Decker. ### ### AUTHOR: Vincent Goulet dinvgauss <- function(x, mean, shape = 1, dispersion = 1/shape, log = FALSE) .External(C_actuar_do_dpq, "dinvgauss", x, mean, dispersion, log) pinvgauss <- function(q, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvgauss", q, mean, dispersion, lower.tail, log.p) qinvgauss <- function(p, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE, tol = 1e-14, maxit = 100, echo = FALSE, trace = echo) .External(C_actuar_do_dpq, "qinvgauss", p, mean, dispersion, lower.tail, log.p, tol, maxit, trace) rinvgauss <- function(n, mean, shape = 1, dispersion = 1/shape) .External(C_actuar_do_random, "rinvgauss", n, mean, dispersion) minvgauss <- function(order, mean, shape = 1, dispersion = 1/shape) .External(C_actuar_do_dpq, "minvgauss", order, mean, dispersion, FALSE) levinvgauss <- function(limit, mean, shape = 1, dispersion = 1/shape, order = 1) .External(C_actuar_do_dpq, "levinvgauss", limit, mean, dispersion, order, FALSE) mgfinvgauss <- function(t, mean, shape = 1, dispersion = 1/shape, log = FALSE) .External(C_actuar_do_dpq, "mgfinvgauss", t, mean, dispersion, log) actuar/R/GeneralizedBeta.R0000644000176200001440000000334215147745722015122 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}genbeta functions to compute ### characteristics of the Generalized Beta distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = Pr[Y <= (x/scale)^shape3], 0 < x < scale, ### ### where Y has a Beta distribution with parameters shape1 and shape2. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dgenbeta <- function (x, shape1, shape2, shape3, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dgenbeta", x, shape1, shape2, shape3, scale, log) pgenbeta <- function (q, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pgenbeta", q, shape1, shape2, shape3, scale, lower.tail, log.p) qgenbeta <- function (p, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qgenbeta", p, shape1, shape2, shape3, scale, lower.tail, log.p) rgenbeta <- function (n, shape1, shape2, shape3, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rgenbeta", n, shape1, shape2, shape3, scale) mgenbeta <- function (order, shape1, shape2, shape3, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mgenbeta", order, shape1, shape2, shape3, scale, FALSE) levgenbeta <- function (limit, shape1, shape2, shape3, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levgenbeta", limit, shape1, shape2, shape3, scale, order, FALSE) actuar/R/ZeroModifiedGeometric.R0000644000176200001440000000145215147745722016314 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}zmgeom functions to compute ### characteristics of the Zero Modified Geometric distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dzmgeom <- function (x, prob, p0, log = FALSE) .External(C_actuar_do_dpq, "dzmgeom", x, prob, p0, log) pzmgeom <- function(q, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pzmgeom", q, prob, p0, lower.tail, log.p) qzmgeom <- function(p, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qzmgeom", p, prob, p0, lower.tail, log.p) rzmgeom <- function(n, prob, p0) .External(C_actuar_do_random, "rzmgeom", n, prob, p0) actuar/R/exact.R0000644000176200001440000000234015147745722013176 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Exact calculation of the aggregate claim amount distribution ### function by convolution. Requires a discrete distribution for ### claim amounts. ### ### AUTHORS: Vincent Goulet ### and Louis-Philippe Pouliot exact <- function(fx, pn, x.scale = 1) { ## Some useful lengths m <- length(fx) # 1 + maximum claim amount n <- length(pn) - 1 # maximum number of claims r <- n * m - n + 1 # maximum total amount of claims ## Initialization of the output vector fs <- rep(0, r) fs[1] <- pn[1] # Pr[N = 0] ## Convolutions fxc <- 1 for (i in 1:n) { pos <- seq_len(i * m - i + 1) fxc <- convolve(fx, rev(fxc), type = "open") fs[pos] <- fs[pos] + fxc * pn[i + 1] } FUN <- approxfun((0:(length(fs) - 1)) * x.scale, pmin(cumsum(fs), 1), method = "constant", yleft = 0, yright = 1, f = 0, ties = "ordered") class(FUN) <- c("ecdf", "stepfun", class(FUN)) assign("fs", fs, envir = environment(FUN)) assign("x.scale", x.scale, envir = environment(FUN)) FUN } actuar/R/LognormalMoments.R0000644000176200001440000000102415147745722015365 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,lev}lnorm functions. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet mlnorm <- function(order, meanlog = 0, sdlog = 1) .External(C_actuar_do_dpq, "mlnorm", order, meanlog, sdlog, FALSE) levlnorm <- function(limit, meanlog = 0, sdlog = 1, order = 1) .External(C_actuar_do_dpq, "levlnorm", limit, meanlog, sdlog, order, FALSE) actuar/R/SingleParameterPareto.R0000644000176200001440000000226315147745722016333 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}single-parameter pareto ### functions. The single-parameter Pareto distribution used in these ### functions has cumulative distribution function ### ### Pr[X <= x] = 1 - (min/x)^shape, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dpareto1 <- function (x, shape, min, log = FALSE) .External(C_actuar_do_dpq, "dpareto1", x, shape, min, log) ppareto1 <- function(q, shape, min, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ppareto1", q, shape, min, lower.tail, log.p) qpareto1 <- function(p, shape, min, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qpareto1", p, shape, min, lower.tail, log.p) rpareto1 <- function(n, shape, min) .External(C_actuar_do_random, "rpareto1", n, shape, min) mpareto1 <- function(order, shape, min) .External(C_actuar_do_dpq, "mpareto1", order, shape, min, FALSE) levpareto1 <- function(limit, shape, min, order = 1) .External(C_actuar_do_dpq, "levpareto1", limit, shape, min, order, FALSE) actuar/R/var-methods.R0000644000176200001440000000267615147745722014337 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Variance and standard deviation ### ### See Klugman, Panjer & Willmot, Loss Models, Wiley, 1998. ### ### AUTHOR: Vincent Goulet ### Walter Garcia-Fontes ## New generics for functions of the stats package var <- function(x, ...) UseMethod("var") sd <- function(x, ...) UseMethod("sd") ## Default methods are stats::var and stats:sd var.default <- function(x, y = NULL, na.rm = FALSE, use, ...) stats::var(x, y = NULL, na.rm = na.rm, use) sd.default <- function(x, na.rm = FALSE, ...) stats::sd(x, na.rm = na.rm) ## Methods for grouped data var.grouped.data <- function(x, ...) { ## Get group boundaries cj <- eval(expression(cj), envir = environment(x)) ## Compute group midpoints midpoints <- cj[-length(cj)] + diff(cj)/2 ## Extract frequencies columns by dropping the boundaries column; ## convert to matrix for use in crossprod() x <- as.matrix(x[-1L]) ## Compute mean per column (avoiding a call to ## 'mean.grouped.data') that would redo most of the computations ## above. means <- drop(crossprod(x, midpoints))/colSums(x) ## Compute midpoints minus mean and square it midsquare <- (midpoints - means)^2 ## Compute mean per column drop(crossprod(x, midsquare))/(colSums(x) - 1) } sd.grouped.data <- function(x, ...) { ## Square root of variance drop(sqrt(var.grouped.data(x))) } actuar/R/cm.R0000644000176200001440000003311115147745722012471 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Main interface to credibility model fitting functions. ### ### AUTHORS: Louis-Philippe Pouliot, Tommy Ouellet, ### Vincent Goulet . cm <- function(formula, data, ratios, weights, subset, regformula = NULL, regdata, adj.intercept = FALSE, method = c("Buhlmann-Gisler", "Ohlsson", "iterative"), likelihood, ..., tol = sqrt(.Machine$double.eps), maxit = 100, echo = FALSE) { Call <- match.call(expand.dots = TRUE) ## Catch the pure bayesian special case. if (formula == "bayes") { if (missing(data) || length(data) == 0L) data <- NULL res <- bayes(data, likelihood, ...) class(res) <- c("cm", class(res)) attr(res, "call") <- Call return(res) } ## === MODEL ANALYSIS === ## ## Decompose the formula giving the portfolio structure. Attribute ## "order" gives the interaction level of each term in the ## formula. In hierarchical structures, each term should represent ## a different level, hence there should not be any duplicates in ## this attribute. The column names in 'data' containing the ## portfolio structure can be obtained from the rownames of the ## matrix in attribute "factors". ## ## Note that the very last level, the data, is not taken into ## account here. tf <- terms(formula) level.numbers <- attr(tf, "order") # level IDs level.names <- rownames(attr(tf, "factors")) # level names nlevels <- length(level.names) # number of levels ## Sanity checks ## ## 1. only hierarchical interactions are allowed in 'formula'; ## 2. hierarchical regression models are not supported. ## 3. if 'ratios' is missing, all columns of 'data' are taken to ## be ratios, so 'weights' should also be missing; ## if (any(duplicated(level.numbers))) stop(sprintf("unsupported interactions in %s", sQuote("formula"))) if (nlevels > 1 && !is.null(regformula)) stop("hierarchical regression models not supported") if (missing(ratios) & !missing(weights)) stop("ratios have to be supplied if weights are") ## === DATA EXTRACTION === ## ## 'data' is split into three matrices: one for the portfolio ## structure, one for the ratios and one for the weights. They are ## obtained via calls to subset() built from this function's ## call. That way, arguments 'ratios', 'weights' and 'subset' are ## not evaluated before being passed to subset(). Argument ## matching is as follows: ## ## Argument of cm() Argument of subset() ## ================ ==================== ## data x ## ratios select ## weights select ## subset subset ## ## Positions of the arguments that will be needed. m <- match(c("data", "ratios", "weights", "subset"), names(Call), 0) ## Extraction of the portfolio structure. Arguments 'data' and ## 'subset' are passed to subset(). cl <- Call[c(1, m[c(1, 4)])] # use data and subset only cl[[1]] <- as.name("subset") # change function name names(cl)[2] <- "x" # argument matching cl$select <- level.names # add argument 'select' levs <- eval(cl, parent.frame()) # extraction ## Object 'levs' is a data frame or matrix with as many colums as ## there are levels in the model (still notwithstanding the data ## level). Rows contain nodes identifiers which can be ## anything. For calculations, these identifiers are converted ## into simple subscripts (i, j, k, ...) as used in mathematical ## notation. ## ## Note that 'apply' will coerce to a matrix. ilevs <- apply(levs, 2, function(x) as.integer(factor(x))) ## Extraction of the ratios. If argument 'ratios' is missing, then ## use all columns of 'data' except those of the portfolio ## structure. cl$select <- if (missing(ratios)) setdiff(colnames(data), level.names) else Call[[m[2]]] ratios <- as.matrix(eval(cl, parent.frame())) # ratios as matrix ## Creation of a weight matrix. Extract from data if argument ## 'weights' is specified, otherwise create a matrix of ones. For ## extraction, the only change from ratio extraction is the ## content of element "select" of the call. weights <- if (missing(weights)) { if (any(is.na(ratios))) stop("missing ratios not allowed when weights are not supplied") array(1, dim(ratios)) # matrix of ones } else { cl$select <- Call[[m[3]]] as.matrix(eval(cl, parent.frame())) # weights as matrix } ## == DISPATCH TO APPROPRIATE CALCULATION FUNCTION == ## ## Buhlmann-Straub models are handled by bstraub(), regression ## models by hache() and hierarchical models by hierarc(). if (nlevels < 2) # one-dimensional model { ## One-dimensional models accept only "unbiased" and ## "iterative" for argument 'method'. method <- match.arg(method) if (method == "Buhlmann-Gisler" || method == "Ohlsson") method <- "unbiased" if (is.null(regformula)) # Buhlmann-Straub { res <- bstraub(ratios, weights, method = method, tol = tol, maxit = maxit, echo = echo) } else # Hachemeister { ## If regression model is actually empty or has only an ## intercept, call bstraub(). trf <- terms(regformula) res <- if (length(attr(trf, "factors")) == 0) { warning("empty regression model; fitting with Buhlmann-Straub's model") bstraub(ratios, weights, method = method, tol = tol, maxit = maxit, echo = echo) } else hache(ratios, weights, regformula, regdata, adj.intercept = adj.intercept, method = method, tol = tol, maxit = maxit, echo = echo) } ## Add missing quantities to results. res$classification <- levs res$ordering <- list(seq_along(levs)) } else # hierarchical model { ## Computations with auxiliary function. res <- hierarc(ratios, weights, classification = ilevs, method = method, tol = tol, maxit = maxit, echo = echo) ## Put back original level names into the object res$classification <- levs } ## Transfer level names to lists names(res$means) <- names(res$weights) <- c("portfolio", level.names) names(res$unbiased) <- if (!is.null(res$unbiased)) names(res$means) names(res$iterative) <- if (!is.null(res$iterative)) names(res$means) names(res$nodes) <- names(res$ordering) <- level.names if (is.list(res$cred)) names(res$cred) <- level.names ## Results class(res) <- c("cm", class(res)) attr(res, "call") <- Call res } predict.cm <- function(object, levels = NULL, newdata, ...) { ## Convert the character 'levels' argument into numeric and pass ## to next method. level.names <- names(object$nodes) levels <- if (is.null(levels)) seq_along(level.names) else pmatch(levels, level.names) if (any(is.na(levels))) stop("invalid level name") NextMethod() } print.cm <- function(x, ...) { chkDots(...) # method does not use '...' nlevels <- length(x$nodes) level.names <- names(x$nodes) b <- if (is.null(x$iterative)) x$unbiased else x$iterative cat("Call:\n", paste(deparse(attr(x, "call"), width.cutoff = getOption("deparse.cutoff")), sep = "\n", collapse = "\n"), "\n\n", sep = "") cat("Structure Parameters Estimators\n\n") cat(" Collective premium:", x$means[[1]], "\n") for (i in seq.int(nlevels)) { if (i == 1L) { ## Treat the Hachemeister model separately since in this ## case the variance components vector is a list, with the ## first element a matrix. (Note that since a matrix with ## empty column names is printed to the screen, there will ## be a blank line in the display. Hence the inserted ## newline in the 'else' case.) if (attr(x, "model") == "regression") { m <- b[[1]] dimnames(m) <- list(c(paste(" Between", level.names[i], "variance: "), rep("", nrow(m) - 1)), rep("", ncol(m))) print(m) } else cat("\n Between", level.names[i], "variance:", b[i], "\n") } else cat(" Within ", level.names[i - 1], "/Between ", level.names[i], " variance: ", b[i], "\n", sep = "") } cat(" Within", level.names[nlevels], "variance:", b[[nlevels + 1]], "\n", fill = TRUE) invisible(x) } summary.cm <- function(object, levels = NULL, newdata, ...) { nlevels <- length(object$nodes) if (nlevels == 1L) { ## Single level cases (Buhlmann-Straub and Hachemeister): ## return the object with the following modifications: put ## credibility factors into a list and add a list of the ## credibility premiums. object$premiums <- list(predict(object, newdata = newdata)) object$cred <- list(object$cred) class(object) = c("summary.cm", class(object)) } else { ## Multi-level case (hierarchical): select result of the ## appropriate level(s). plevs <- if (is.null(levels)) seq_along(names(object$nodes)) else pmatch(levels, names(object$nodes)) if (any(is.na(plevs))) stop("invalid level name") object$premiums <- predict(object, levels) # new element object$means <- object$means[c(1, plevs + 1)] object$weights <- object$weights[c(1, plevs + 1)] object$unbiased <- object$unbiased[sort(unique(c(plevs, plevs + 1)))] object$iterative <- object$iterative[sort(unique(c(plevs, plevs + 1)))] object$cred <- object$cred[plevs] object$classification <- object$classification[, seq.int(max(plevs)), drop = FALSE] object$nodes <- object$nodes[plevs] class(object) <- c("summary.cm", class(object)) } structure(object, ...) # attach additional attributes in '...' } print.summary.cm <- function(x, ...) { nlevels <- length(x$nodes) level.names <- names(x$nodes) NextMethod() # print.cm() cat("Detailed premiums\n\n") for (i in seq.int(nlevels)) { ## Print a "section title" only if there is more than one ## level. (Provision introduced in v2.3.0; before the title ## was always printed.) if (nlevels > 1L) cat(" Level:", level.names[i], "\n") ## There are no level names in the linear Bayes case, so we ## skip this column in the results. if (is.null(level.names)) levs <- NULL else { level.id <- match(level.names[i], colnames(x$classification)) levs <- x$classification[, seq.int(level.id), drop = FALSE] m <- duplicated(levs) } if (attr(x, "model") == "regression") { ## Hachemeister model: results contain matrices y <- cbind(" ", as.vector(format(x$means[[i + 1L]], ...)), as.vector(apply(format(x$cred[[i]], ...), c(1L, 3L), paste, collapse = " ")), as.vector(format(sapply(x$adj.models, coef), ...)), " ") y[seq(1, nrow(y), dim(x$cred[[i]])[1]), c(1L, 5L)] <- c(levs[!m, , drop = FALSE], format(x$premiums[[i]], ...)) colnames(y) <- c(colnames(levs), "Indiv. coef.", "Cred. matrix", "Adj. coef.", "Cred. premium") } else if (is.null(levs)) { ## Linear Bayes model: simplified results with no level ## column y <- cbind(format(x$means[[i + 1L]], ...), format(x$weights[[i + 1L]], ...), format(x$cred[[i]], ...), format(x$premiums[[i]], ...)) colnames(y) <- c("Indiv. mean", "Weight", "Cred. factor", "Bayes premium") } else { ## All other models y <- cbind(as.matrix(levs[!m, , drop = FALSE]), format(x$means[[i + 1L]], ...), format(x$weights[[i + 1L]], ...), format(x$cred[[i]], ...), format(x$premiums[[i]], ...)) colnames(y) <- c(colnames(levs), "Indiv. mean", "Weight", "Cred. factor", "Cred. premium") } rownames(y) <- rep(" ", nrow(y)) print(y, quote = FALSE, right = FALSE, ...) cat("\n") } invisible(x) } actuar/R/elev.R0000644000176200001440000001060115147745722013024 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Sample empirical limited value functions for individual and ### grouped data. ### ### AUTHORS: Vincent Goulet and ### Mathieu Pigeon elev <- function(x, ...) UseMethod("elev") elev.default <- function(x, ...) { chkDots(...) # method does not use '...' Call <- match.call() if (exists(".Generic", inherits = FALSE)) Call[[1]] <- as.name(.Generic) FUN <- function(limit) sapply(limit, function(x, y) mean(pmin(x, y)), x = x) environment(FUN) <- new.env() assign("x", sort(x), envir = environment(FUN)) assign("n", length(unique(x)), envir = environment(FUN)) class(FUN) <- c("elev", class(FUN)) attr(FUN, "call") <- Call attr(FUN, "grouped") <- FALSE FUN } ### Function 'elev.grouped.data' below returns a function that uses ### data stored in its environment. Avoid false positive in R CMD ### check. if (getRversion() >= "2.15.1") utils::globalVariables(c("cj", "nj")) ### This function assumes right-closed intervals, but the numerical ### values are identical for left-closed intervals. elev.grouped.data <- function(x, ...) { chkDots(...) # method does not use '...' Call <- match.call() if (exists(".Generic", inherits = FALSE)) Call[[1]] <- as.name(.Generic) FUN <- function(limit) { ## Explicitely get the data from the function environment. ## cj <- eval(expression(cj)) ## nj <- eval(expression(nj)) ## Number of classes. r <- length(nj) ## This is to avoid numerical problems. limit <- pmin(limit, cj[r + 1L]) ## Class in which the limit is located. cl <- findInterval(limit, cj, all.inside = TRUE) ## Means for all classes below each limit. cjt <- head(cj, max(cl)) # upper bounds res1 <- sapply(cl - 1L, function(n, x) drop(crossprod(head(x, n), head(nj, n))), (head(cjt, -1) + tail(cjt, -1))/2) ## Means for classes with each limit. cjt <- cj[cl] # lower bounds njt <- nj[cl] # frequencies p <- (limit - cjt) / (cj[cl + 1L] - cjt) # prop. to take res2 <- njt * p * (cjt + limit)/2 + njt * (1 - p) * limit ## Means for classes above each limit. res3 <- limit * sapply(r - cl, function(n, x) sum(tail(x, n)), tail(nj, -min(cl))) ## Total (res1 + res2 + res3)/sum(nj) } environment(FUN) <- new.env() assign("cj", eval(expression(cj), envir = environment(x)), envir = environment(FUN)) assign("nj", x[, 2L], envir = environment(FUN)) assign("n", nrow(x), envir = environment(FUN)) class(FUN) <- c("elev", class(FUN)) attr(FUN, "call") <- Call attr(FUN, "grouped") <- TRUE FUN } ### Essentially identical to stats::print.ecdf(). print.elev <- function(x, digits = getOption("digits") - 2, ...) { ## Utility function numform <- function(x) paste(formatC(x, digits = digits), collapse = ", ") ## The rest is adapted from ecdf() varname <- if (attr(x, "grouped")) "cj" else "x" cat("Empirical LEV \nCall: ") print(attr(x, "call"), ...) n <- length(xx <- eval(parse(text = varname), envir = environment(x))) i1 <- 1L:min(3L, n) i2 <- if (n >= 4L) max(4L, n - 1L):n else integer(0) cat(" ", varname, "[1:", n, "] = ", numform(xx[i1]), if (n > 3L) ", ", if (n > 5L) " ..., ", numform(xx[i2]), "\n", sep = "") invisible(x) } ### Essentially identical to stats::summary.ecdf(). summary.elev <- function (object, ...) { cat("Empirical LEV:\t ", eval(expression(n), envir = environment(object)), "unique values with summary\n") summary(knots(object), ...) } ### Essentially identical to stats::knots.stepfun(). knots.elev <- function(Fn, ...) { if (attr(Fn, "grouped")) eval(expression(cj), envir = environment(Fn)) else eval(expression(x), envir = environment(Fn)) } plot.elev <- function(x, ..., main = NULL, xlab = "x", ylab = "Empirical LEV") { if (missing(main)) main <- { cl <- attr(x, "call") deparse(if (!is.null(cl)) cl else sys.call()) } kn <- knots(x) Fn <- x(kn) plot(kn, Fn, ..., main = main, xlab = xlab, ylab = ylab) } actuar/R/ZeroTruncatedPoisson.R0000644000176200001440000000161515147745722016242 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}ztpois functions to compute ### characteristics of the Zero Truncated Poisson distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dztpois <- function (x, lambda, log = FALSE) .External(C_actuar_do_dpq, "dztpois", x, lambda, log) pztpois <- function(q, lambda, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pztpois", q, lambda, lower.tail, log.p) qztpois <- function(p, lambda, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qztpois", p, lambda, lower.tail, log.p) rztpois <- function(n, lambda) .External(C_actuar_do_random, "rztpois", n, lambda) ## not exported; for internal use in panjer() pgfztpois <- function(x, lambda) expm1(lambda * x)/expm1(lambda) actuar/R/InverseWeibull.R0000644000176200001440000000315615147745722015037 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}invweibull functions to compute ### characteristics of the Inverse Weibull distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = exp(-(x/scale)^shape), x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dinvweibull <- function (x, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dinvweibull", x, shape, scale, log) pinvweibull <- function(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvweibull", q, shape, scale, lower.tail, log.p) qinvweibull <- function(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qinvweibull", p, shape, scale, lower.tail, log.p) rinvweibull <- function(n, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rinvweibull", n, shape, scale) minvweibull <- function(order, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "minvweibull", order, shape, scale, FALSE) levinvweibull <- function(limit, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levinvweibull", limit, shape, scale, order, FALSE) ## Aliases dlgompertz <- dinvweibull plgompertz <- pinvweibull qlgompertz <- qinvweibull rlgompertz <- rinvweibull mlgompertz <- minvweibull levlgompertz <- levinvweibull actuar/R/Pareto.R0000644000176200001440000000232415147745722013326 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}pareto functions to compute ### characteristics of the Pareto distribution. The version used in ### these functions has cumulative distribution function ### ### Pr[X <= x] = 1 - (scale/(x + scale))^shape, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dpareto <- function (x, shape, scale, log = FALSE) .External(C_actuar_do_dpq, "dpareto", x, shape, scale, log) ppareto <- function (q, shape, scale, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ppareto", q, shape, scale, lower.tail, log.p) qpareto <- function (p, shape, scale, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qpareto", p, shape, scale, lower.tail, log.p) rpareto <- function(n, shape, scale) .External(C_actuar_do_random, "rpareto", n, shape, scale) mpareto <- function(order, shape, scale) .External(C_actuar_do_dpq, "mpareto", order, shape, scale, FALSE) levpareto <- function(limit, shape, scale, order = 1) .External(C_actuar_do_dpq, "levpareto", limit, shape, scale, order, FALSE) actuar/R/coverage.R0000644000176200001440000003217415147745722013675 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Create modified density and modified cumulative distribution ### function for data with deductible, limit, coinsurance and ### inflation. ### ### See Chapter 8 of Klugman, Panjer & Willmot, Loss Models, Fourth ### Edition, Wiley, 2012. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet ## TODO (2025-03-18): when 'coverage' is called with variables, say ## 'foo' and 'bar', holding the names of the pdf and cdf, the function ## returned contains calls to 'foo' and 'bar' instead of the names ## actually therein. To solve this, replace Call$[pc]df by ## eval(Call$[pc]df). ## ## TODO (2025-03-18): furthermore, the underlying pdf and cdf should ## be put in the environment of the returned function, such that its ## evaluation remains possible even if the pdf and cdf are not in the ## current environment. Perhaps only one of these two changes is ## necessary. ## ## TODO (2025-03-19): in the limit case where there is no deductible ## and no limit, the values at 0 (for densities with the mode at 0) ## and Inf (least likely) may be wrong. coverage <- function(pdf, cdf, deductible = 0, franchise = FALSE, limit = Inf, coinsurance = 1, inflation = 0, per.loss = FALSE) { Call <- match.call() ## First determine if the cdf is needed or not. It is needed when ## there is a deductible or a limit and, of course, if the output ## function should compute the cdf. is.cdf <- missing(pdf) || is.null(pdf) # return cdf? has.limit <- limit < Inf # used often needs.cdf <- any(deductible > 0, has.limit, is.cdf) # cdf needed? ## Sanity check of arguments if (any(deductible < 0, limit < 0, coinsurance < 0, inflation < 0)) stop("coverage modifications must be positive") if (limit <= deductible) stop("deductible must be smaller than the limit") if (coinsurance > 1) stop("coinsurance must be between 0 and 1") if (missing(cdf) & needs.cdf) stop(sprintf("%s must be supplied", sQuote("cdf"))) ## Quantites often used. Leave as expressions for the output ## function to preserve accuracy. r <- 1 + inflation d <- if (inflation) substitute(d/r, list(d = deductible, r = r)) else deductible u <- if (inflation) substitute(u/r, list(u = limit, r = r)) else limit ## The modified cdf or pdf are usually defined in branches. To ## avoid using nested ifelse(), we will rather rely on sets of ## expressions to make the required calculations for each branch ## separately. This is actually much faster. ## ## The output function will have varying number of said ## expressions depending on the case that is dealt with. We will ## build the body of the output function piece by piece as we go ## along. e <- expression(Call <- match.call()) ## One main discriminating factor is whether the cdf is needed for ## the output function of not. if (needs.cdf) { ## Get argument list of 'cdf' to transfert them to the output ## function. argv <- formals(cdf) # arguments as list argn <- names(argv) # arguments names as strings ## Remember if argument 'lower.tail' is available, so we can ## use it later. Then, drop unsupported arguments 'lower.tail' ## and 'log.p'. has.lower <- "lower.tail" %in% argn argn <- setdiff(argn, c("lower.tail", "log.p")) ## Calculations in the output function are done by evaluating ## function calls built upon the call to the output function ## itself. This is convenient as we do not need to fiddle with ## the values of the formal arguments. if (is.cdf) # output function computes F(y) { ## The output function will have the same formal arguments ## as the cdf. Object 'x' holds the symbol of the first ## argument. argsFUN <- argv[argn] # arguments of output function x <- as.name(argn[1]) # symbol ## Calls needed in this case: ## 1. one to compute F(y); ## 2. one to compute F(d) if there is a deductible; ## 3. one to compute 1 - F(d) for the per payment cases. ## Never need to compute F(u). ## ## If 'lower.tail' is available in 'cdf', then set it to ## FALSE to compute S(d) = 1 - F(d) more accurately. e <- c(e, quote(F <- Call), # 1. substitute(F[[1L]] <- as.name(fun), list(fun = as.character(Call$cdf)))) if (deductible) { e <- c(e, quote(Fd <- F), # 2. substitute(Fd[[2L]] <- a, list(a = d))) if (!per.loss & has.lower) e <- c(e, quote(Sd <- Fd), # 3. quote(Sd$lower.tail <- FALSE)) } } else # output function computes f(y) { ## When there is a limit, we will need to compute 1 - F(u) ## as is or using 'lower.tail = FALSE' to improve ## accuracy. For clarity in the output function, we will ## use Fu as object name in the first case and Su in the ## second. if (has.limit) { if (has.lower) { Fu.name <- as.name("Su") Su.quote <- quote(eval.parent(Su)) } else { Fu.name <- as.name("Fu") Su.quote <- quote((1 - eval.parent(Fu))) } } ## Calls needed in this case: ## 1. one to compute F(d) if there is a deductible for the ## per loss cases; ## 2. one to compute 1 - F(d) if there is a deductible for the ## per payment cases; ## 3. one to compute 1 - F(u) when there is a limit. ## No function to compute F(y) needed. ## ## If 'lower.tail' is available in 'cdf', then set it to ## FALSE to compute S(d) = 1 - F(d) and S(u) = 1 - F(u) ## more accurately. if (deductible) # f(y) with deductible { Fd.name <- as.name(if (!per.loss & has.lower) "Sd" else "Fd") e <- c(e, substitute(G <- Call, list(G = Fd.name)), # 1. or 2. if (!per.loss & has.lower) quote(Sd$lower.tail <- FALSE), substitute(G[[1L]] <- as.name(fun), list(G = Fd.name, fun = as.character(Call$cdf))), substitute(names(G)[2L] <- q, list(G = Fd.name, q = argn[1])), substitute(G[[2L]] <- a, list(G = Fd.name, a = d))) if (has.limit) e <- c(e, substitute(H <- G, list(H = Fu.name, G = Fd.name)), # 3. if (per.loss & has.lower) quote(Su$lower.tail <- FALSE), substitute(H[[2L]] <- a, list(H = Fu.name, a = u))) } else # f(y) with limit only { ## Since 'needs.cdf == TRUE', then this case ## necessarily has 'limit < Inf'. Only call needed is ## one to compute 1 - F(u). e <- c(e, substitute(G <- Call, list(G = Fu.name)), if (has.lower) quote(Su$lower.tail <- FALSE), substitute(G[[1L]] <- as.name(fun), list(G = Fu.name, fun = as.character(Call$cdf))), substitute(names(G)[2L] <- q, list(G = Fu.name, q = argn[1])), substitute(G[[2L]] <- a, list(G = Fu.name, a = u))) } } } ## Repeat same steps as above for case needing the pdf. The output ## function is a pdf and in this case the arguments of the output ## function are those of 'pdf'. if (!is.cdf) { argv <- formals(pdf) # arguments as list argn <- setdiff(names(argv), "log") # drop argument 'log' argsFUN <- argv[argn] # arguments of output function x <- as.name(argn[1]) # symbol e <- c(e, quote(f <- Call), substitute(f[[1L]] <- as.name(fun), list(fun = as.character(Call$pdf)))) } ## Build the value at which the underlying pdf/cdf will be called ## for non special case values of 'x'. We need to index 'x' to ## only compute for the correct values of a given branch. x.mod <- as.call(c(as.name("["), x, as.name("w"))) if (coinsurance < 1) x.mod <- substitute(x/alpha, list(x = x.mod, alpha = coinsurance)) if (deductible & !franchise) x.mod <- substitute(x + d, list(x = x.mod, d = deductible)) if (inflation) x.mod <- substitute((x)/r, list(x = x.mod, r = r)) ## Each pdf/cdf is defined in three branches. Define the ## boundaries and conditions for the first two branches. Those for ## the third branch are defined further down. if (franchise) { bound1 <- coinsurance * deductible bound2 <- coinsurance * limit cond1 <- if (is.cdf) substitute(0 <= x & x <= b1, list(x = x, b1 = bound1)) else quote(x == 0) cond2 <- substitute(b1 < x & x < b2, list(x = x, b1 = bound1, b2 = bound2)) } else { bound1 <- 0 bound2 <- coinsurance * (limit - deductible) cond1 <- substitute(x == 0, list(x = x)) cond2 <- substitute(0 < x & x < b, list(x = x, b = bound2)) } ## Initialization of the results vector in the output function ## with 0s. e <- c(e, substitute(res <- numeric(length(x)), list(x = x))) ## Definition of the output function for the first branch. There ## is a computation to make only if there is a deductible with the ## payment per loss random variable. For all other cases, the ## value in the first branch is 0 and we rely on the ## initialization with numeric() done at the previous step. if (per.loss & deductible) e <- c(e, substitute(res[which(cond1)] <- eval.parent(Fd), list(cond1 = cond1))) ## Definition of the output function for the second and third ## branches. The 'is.cdf = TRUE' and 'is.cdf = FALSE' cases must ## be treated separately. if (is.cdf) { cond3 <- substitute(x >= b, list(x = x, b = bound2)) f2 <- quote(eval.parent(F)) if (!per.loss & deductible) f2 <- if (has.lower) substitute((f - F)/S, list(f = f2, F = quote(eval.parent(Fd)), S = quote(eval.parent(Sd)))) else substitute((f - F)/S, list(f = f2, F = quote((p <- eval.parent(Fd))), S = quote((1 - p)))) e <- c(e, substitute(w <- which(cond), list(cond = cond2)), substitute(F[[2L]] <- x, list(x = x.mod)), substitute(res[w] <- f, list(f = f2)), if (has.limit) substitute(res[cond] <- 1, list(cond = cond3))) } else { cond3 <- substitute(x == b, list(x = x, b = bound2)) f2 <- quote(eval.parent(f)) if (has.limit) f3 <- Su.quote if (!per.loss & deductible) { if (has.limit) { f2 <- if (has.lower) substitute(f/(p <- S), list(f = f2, S = quote(eval.parent(Sd)))) else substitute(f/(p <- S), list(f = f2, S = quote(1 - eval.parent(Fd)))) f3 <- substitute(f/p, list(f = f3)) } else f2 <- if (has.lower) substitute(f/S, list(f = f2, S = quote(eval.parent(Sd)))) else substitute(f/S, list(f = f2, S = quote((1 - eval.parent(Fd))))) } if (inflation | coinsurance < 1) f2 <- substitute(f/k, list(f = f2, k = coinsurance * r)) e <- c(e, substitute(w <- which(cond), list(cond = cond2)), substitute(f[[2L]] <- x, list(x = x.mod)), substitute(res[w] <- f, list(f = f2)), if (has.limit) substitute(res[cond] <- f, list(cond = cond3, f = f3))) } ## Last expression of the output function. e <- c(e, quote(res)) ## Wrap up the output function. FUN <- function() {} body(FUN) <- as.call(c(as.name("{"), e)) # taken from help(body) formals(FUN) <- argsFUN # set arguments environment(FUN) <- new.env() # new, empty environment FUN } actuar/R/emm.R0000644000176200001440000000223015147745722012646 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Empirical moments for individual and grouped data. ### ### See Klugman, Panjer & Willmot, Loss Models, Wiley, 1998. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet emm <- function(x, order = 1, ...) UseMethod("emm") emm.default <- function(x, order = 1, ...) { if (any(order < 0)) stop(sprintf("%s must be positive", sQuote("order"))) colMeans(outer(x, order, "^"), ...) } emm.grouped.data <- function(x, order = 1, ...) { ## Function does not work for negative moments if (any(order < 0)) stop(sprintf("%s must be positive", sQuote("order"))) ## Extract group boundaries cj <- eval(expression(cj), envir = environment(x)) ## Compute the factor ## ## f_j = (c_j^{k + 1} - c_{j-1}^{k+1})/((k+1) * (c_j - c_{j-1})) ## ## for all values of 'j' and 'k' == 'order'. y <- diff(outer(cj, order + 1, "^")) / outer(diff(cj), order + 1) ## Drop the group boundaries column x <- as.matrix(x[-1L]) ## Compute sum(n_j * f_j)/sum(nj) for all values of 'order'. drop(crossprod(x, y)) / colSums(x, ...) } actuar/R/InverseTransformedGamma.R0000644000176200001440000000336615147745722016666 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}invtrgamma functions to compute ### characteristics of the Inverse Transformed Gamma distribution. The ### version used in these functions has cumulative distribution ### function ### ### Pr[X <= x] = 1 - pgamma((x/scale)^shape2, shape1, scale = 1) ### ### or, equivalently, ### ### Pr[X <= x] = 1 - pgamma(x^shape2, shape1, scale = scale^shape2). ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dinvtrgamma <- function (x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dinvtrgamma", x, shape1, shape2, scale, log) pinvtrgamma <- function(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvtrgamma", q, shape1, shape2, scale, lower.tail, log.p) qinvtrgamma <- function(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qinvtrgamma", p, shape1, shape2, scale, lower.tail, log.p) rinvtrgamma <- function(n, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rinvtrgamma", n, shape1, shape2, scale) minvtrgamma <- function(order, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "minvtrgamma", order, shape1, shape2, scale, FALSE) levinvtrgamma <- function(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levinvtrgamma", limit, shape1, shape2, scale, order, FALSE) actuar/R/Gumbel.R0000644000176200001440000000227115147745722013310 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}gumbel functions to compute ### characteristics of the Gumbel distribution. The version used in ### these functions has cumulative distribution function ### ### Pr[X <= x] = exp(-exp(-(x - alpha)/scale)), -Inf < x < Inf. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dgumbel <- function (x, alpha, scale, log = FALSE) .External(C_actuar_do_dpq, "dgumbel", x, alpha, scale, log) pgumbel <- function(q, alpha, scale, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pgumbel", q, alpha, scale, lower.tail, log.p) qgumbel <- function(p, alpha, scale, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qgumbel", p, alpha, scale, lower.tail, log.p) rgumbel <- function(n, alpha, scale) .External(C_actuar_do_random, "rgumbel", n, alpha, scale) mgumbel <- function(order, alpha, scale) .External(C_actuar_do_dpq, "mgumbel", order, alpha, scale, FALSE) mgfgumbel <- function(t, alpha, scale, log = FALSE) .External(C_actuar_do_dpq, "mgfgumbel", t, alpha, scale, log) actuar/R/hierarc.R0000644000176200001440000003213515147745722013514 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Hierarchical credibility model calculations ### ### AUTHORS: Vincent Goulet , ### Louis-Philippe Pouliot, Tommy Ouellet. hierarc <- function(ratios, weights, classification, method = c("Buhlmann-Gisler", "Ohlsson", "iterative"), tol = sqrt(.Machine$double.eps), maxit = 100, echo = FALSE) { ## === HANDS ON THE DATA === ## ## Arguments 'ratios' and 'weights' must be matrices of real ## numbers, whereas 'classification' must be a matrix of integers ## giving the affiliation of each entity in the portfolio. nlevels <- ncol(classification) # number of levels nlevels1p <- nlevels + 1L # frequently used ## To symmetrize further calculations, bind a column of ones ## representing the affiliation to the global portfolio. classification <- cbind(pf = 1L, classification) ## If weights are not specified, use equal weights. if (missing(weights)) { if (any(is.na(ratios))) stop("missing ratios not allowed when weights are not supplied") array(1, dim(ratios)) # matrix of ones } ## Sanity check if weights and ratios correspond. if (!identical(which(is.na(ratios)), which(is.na(weights)))) stop("missing values are not in the same positions in 'weights' and in 'ratios'") ## === NUMBER OF NODES AND SPLITTING FACTORS === ## ## Future computation of per level summaries will require a set of ## factors based on the number of nodes in each level. An example ## will best explain what is achieved here: suppose there are two ## sectors; sector 1 has 3 units; sector 2 has 2 units. To make ## per sector summaries, the following factors can be used to ## split the unit data: 1 1 1 2 2. ## ## Generating such factors first requires to know the number of ## nodes at each level in a format identical to the 'nodes' ## argument of simul(). [In the previous example, the number of ## nodes would be 'list(2, c(3, 2))'.] Then, the factors are ## obtained by repeating a sequence the same length as the number ## of nodes at one level [2] according to the number of nodes at ## the level below [c(3, 2)]. ## ## 0. Initialization fnodes <- nnodes <- vector("list", nlevels) ## 1. Calculation of the number of nodes: the main idea is to ## create a unique factor for each node using interaction() ## recursively on the columns of 'classification'. We can do ## something simpler for the lowest level (the entities), since we ## know the combinations of indexes to all be different at this ## level. fx <- vector("list", nlevels1p) fx[[nlevels1p]] <- factor(classification[, nlevels1p]) # entity level for (i in nlevels:1L) { ## Function 'interaction' expects its arguments separately or ## as a list, hence the lapply() below. fx[[i]] <- as.integer(interaction(lapply(seq.int(i), function(j) classification[, j]), drop = TRUE)) ## 'as.vector' below is used to get rid of names nnodes[[i]] <- as.vector(sapply(split(fx[[i + 1]], fx[[i]]), function(x) length(unique(x)))) } ## 2. Generation of the factors. Following the rule described ## above, this could simply be ## ## fnodes <- lapply(nnodes, function(x) rep(seq_along(x), x)) ## ## However, this will not work if rows of data are not sorted per ## level. (In the example above, if the data of unit 3 of sector 1 ## is at the bottom of the matrix of data, then the factors need ## to be 1 1 2 2 1.) ## ## The solution is actually simple: converting the entity level ## factors ('fx[[nlevels]]') to integers will assure that any ## summary made using these factors will be sorted. This done, it ## is possible to use the command above for the upper levels. fnodes[[nlevels]] <- as.integer(fx[[nlevels]]) fnodes[-nlevels] <- lapply(nnodes[-nlevels], function(x) rep(seq_along(x), x)) ## === PER ENTITY SUMMARIES === ## ## Individual weighted averages. It could happen that an entity ## has no observations, for example when applying the model on ## claim amounts. In such a situation, put the total weight of the ## entity and the weighted average both equal to zero. That way, ## the premium will be equal to the credibility weighted average, ## as it should, but the entity will otherwise have no ## contribution in the calculations. weights.s <- rowSums(weights, na.rm = TRUE) ratios.w <- ifelse(weights.s > 0, rowSums(weights * ratios, na.rm = TRUE) / weights.s, 0) ## === EFFECTIVE NUMBER OF NODES === ## ## Given the possibility to have whole levels with no data, as ## explained above, it is necessary to count the *effective* ## number of nodes in each level, that is the number of nodes with ## data. This comes this late since it relies on 'weights.s'. ## ## Object 'eff.nnodes' is in every respect equivalent to 'nnodes' ## except that each element of the list is a vector of the number of ## non "empty" nodes for each classification of the level ## above. eff.nnodes <- vector("list", nlevels) w <- weights.s for (i in nlevels:1L) { eff.nnodes[[i]] <- tapply(w, fnodes[[i]], function(x) sum(x > 0)) w <- tapply(w, fnodes[[i]], sum) # running totals } ## === DENOMINATORS OF VARIANCE ESTIMATORS === ## ## The denominators for all the variance estimators never ## change. The denominator at one level is equal to the total ## number of nodes at that level minus the total number of nodes ## at the level above. At the lowest level (the denominator of ## s^2), this is ## ## number of (non NA) ratios - (effective) number of entities. ## ## The number of (non missing) ratios is not included in ## 'eff.nnodes'. For the portfolio level, the denominator is ## ## (effective) number of "sectors" - 1 ## ## The 1 neither is included in 'eff.nnodes'. denoms <- diff(c(1L, sapply(eff.nnodes, sum), sum(!is.na(ratios)))) ## Final sanity checks if (any(!denoms)) stop("there must be at least two nodes at every level") if (ncol(ratios) < 2L) stop("there must be at least one node with more than one period of experience") ## === ESTIMATION OF s^2 === s2 <- sum(weights * (ratios - ratios.w)^2, na.rm = TRUE) / denoms[nlevels1p] ## === ESTIMATION OF THE OTHER VARIANCE COMPONENTS === ## ## Create vectors to hold values to be computed at each level ## (from portfolio to entity), namely: the total node weights, the ## node weighted averages, the between variances and the node ## credibility factors. ## ## Only credibility factors are not computed for the portfolio ## level, hence this list is one shorter than the others. tweights <- vector("list", nlevels1p) # total level weights wmeans <- vector("list", nlevels1p) # weighted averages b <- c(numeric(nlevels), s2) # variance estimators cred <- vector("list", nlevels) # credibility factors ## Values already computed at the entity level. tweights[[nlevels1p]] <- as.vector(weights.s); wmeans[[nlevels1p]] <- as.vector(ratios.w); ## The unbiased variance estimators are evaluated first as they will ## be used as starting values for the iterative part below. ## ## At the entity level: node weight is given by the natural ## weight, weighted averages use the natural weights. ## ## Level above the entity: node weight is the sum of the natural ## weights at the level below, weighted averages use the natural ## weights. ## ## All upper levels: node weight is the sum of the credibility ## factors at the level below, weighted averages use credibility ## factors from previous level. ## ## Buhlmann-Gisler estimators truncate the per node variance ## estimates to 0 before taking the mean, whereas the Ohlsson ## estimators do not make any truncation. method <- match.arg(method) if (method == "Buhlmann-Gisler") bexp <- expression(b[i] <- mean(pmax(ifelse(ci != 0, bi/ci, 0), 0), na.rm = TRUE)) else # Ohlsson bexp <- expression(b[i] <- sum(bi, na.rm = TRUE) / sum(ci, na.rm = TRUE)) for (i in nlevels:1L) { ## Total weight of the level as per the rule above. tweights[[i]] <- as.vector(tapply(tweights[[i + 1L]], fnodes[[i]], sum)) ## Calculation of the weighted averages of the level. Before ## the between variance is estimated, these use the total ## weights calculated above. wmeans[[i]] <- ifelse(tweights[[i]] > 0, as.vector(tapply(tweights[[i + 1L]] * wmeans[[i + 1L]], fnodes[[i]], sum) / tweights[[i]]), 0) ## Latest non-zero between variance estimate -- the one used ## in the estimator and in the credibility factors. between <- b[b != 0][1L] ## Calculation of the per node variance estimate. bi <- as.vector(tapply(tweights[[i + 1L]] * (wmeans[[i + 1L]] - wmeans[[i]][fnodes[[i]]])^2, fnodes[[i]], sum)) - (eff.nnodes[[i]] - 1) * between ci <- tweights[[i]] - as.vector(tapply(tweights[[i + 1L]]^2, fnodes[[i]], sum)) / tweights[[i]] ## The final estimate is the average of all the per node estimates. eval(bexp) ## Calculation of the credibility factors. If these are ## non-zero, the total weights for the current level are ## replaced by the sum of the credibility factors and the ## weighted averages are recomputed with these new weights. #if (max(bu[i], 0)) # don't compute negative factors! if (b[i]) { cred[[i]] <- 1/(1 + between/(b[i] * tweights[[i + 1L]])) tweights[[i]] <- as.vector(tapply(cred[[i]], fnodes[[i]], sum)) wmeans[[i]] <- ifelse(tweights[[i]] > 0, as.vector(tapply(cred[[i]] * wmeans[[i + 1L]], fnodes[[i]], sum) / tweights[[i]]), 0) } else cred[[i]] <- numeric(sum(nnodes[[i]])) } ## Iterative estimation of the structure parameters. ## ## At the entity level: total weight is the sum of the natural ## weights, weighted averages use the natural weights and between ## variance is s^2. ## ## All upper levels: total weight is the sum of the credibility ## factors of the level below, weighted averages use credibility ## factors, between variance estimated recursively and credibility ## factor use total weight of the level, between variance of the ## level below (hence the within variance) and between variance of ## the current level. if (method == "iterative") { b <- pmax(b, 0) # truncation for starting values if (any(head(b, -1L) > 0)) # at least one non-zero starting value .External(C_actuar_do_hierarc, cred, tweights, wmeans, fnodes, denoms, b, tol, maxit, echo) } ## Results structure(list(means = wmeans, weights = tweights, unbiased = if (method != "iterative") b, iterative = if (method == "iterative") b, cred = cred, nodes = nnodes, classification = classification[, -1L], ordering = fnodes), class = "hierarc", model = "hierarchical") } predict.hierarc <- function(object, levels = NULL, newdata, ...) { ## The credibility premium of a node at one level is equal to ## ## p + z * (m - p) ## ## where 'p' is the credibility premium of the level above (or the ## collective premium for the portfolio), 'z' is the credibility ## factor of the node, and 'm' is the weighted average of the ## node. fnodes <- object$ordering cred <- object$cred means <- object$means nlevels <- length(object$nodes) level.names <- names(object$nodes) if (is.null(levels)) levels <- seq_len(nlevels) if (any(is.na(levels)) || !is.numeric(levels)) stop("invalid level number") n <- max(levels) res <- vector("list", n) ## First level credibility premiums res[[1L]] <- means[[1L]] + cred[[1L]] * (means[[2L]] - means[[1L]]) for (i in seq(2, length.out = n - 1)) { p <- res[[i - 1]][fnodes[[i]]] res[[i]] <- p + cred[[i]] * (means[[i + 1]] - p) } structure(res[levels], names = level.names[levels], ...) } actuar/R/ruin.R0000644000176200001440000002634215147745722013057 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Calulation of infinite time ruin probabilities in the models of ### Cramer-Lundberg and Sparre Andersen. In general, one has ### ### psi(u) = pphtype(u, pi, Q, lower.tail = FALSE) ### ### for definitions of pi and Q that depend on the severity and waiting ### times models. An explicit solution exists when both severity and ### waiting times are exponential (no mixture). ### ### _Combinations_ of exponentials as defined in Dufresne & Gerber ### (1988) are NOT supported. ### ### References: ### ### Dufresne, F. and Gerber, H. U. (1988), "Three methods to calculate ### the probability of ruin", Astin Bulletin 19, p. 71-90. ### ### Asmussen, S. and Rolski, T. (1991), "Computational methods in risk ### theory: A matrix-algorithmic approach", Insurance: Mathematics and ### Economics 10, p. 259-274. ### ### AUTHORS: Christophe Dutang, Vincent Goulet ruin <- function(claims = c("exponential", "Erlang", "phase-type"), par.claims, wait = c("exponential", "Erlang", "phase-type"), par.wait, premium.rate = 1, tol = sqrt(.Machine$double.eps), maxit = 200L, echo = FALSE) { ## Sanity checks if (missing(par.claims) || !is.list(par.claims)) stop(sprintf("%s must be a named list", sQuote("par.claims"))) if (missing(par.wait) || !is.list(par.wait)) stop(sprintf("%s must be a named list", sQuote("par.wait"))) claims <- match.arg(claims) wait <- match.arg(wait) ## ============================================== ## Extraction of the claims severity parameters choices <- switch(claims, exponential = c("rate", "weights"), Erlang = c("shape", "rate", "scale", "weights"), "phase-type" = c("prob", "rates")) i <- pmatch(names(par.claims), choices, nomatch = 0L, duplicates.ok = TRUE) if (all(i == 0L)) stop(sprintf("parameters %s missing in %s", paste(dQuote(choices), collapse = ", "), sQuote("par.claims"))) par.claims <- par.claims[i > 0L] # keep relevant components p <- choices[i[i > 0L]] # keep relevant names names(par.claims) <- p # use full names if (claims == "exponential") { if ("rate" %in% p) rate <- par.claims$rate else stop(sprintf("parameter %s missing in %s", dQuote("rate"), sQuote("par.claims"))) n <- length(rate) if ("weights" %in% p && n > 1L) prob <- rep_len(par.claims$weights, n) else if (n == 1L) prob <- 1 else stop(sprintf("parameter %s missing in %s", dQuote("weights"), sQuote("par.claims"))) rates <- diag(-rate, n) } else if (claims == "Erlang") { if ("shape" %in% p) shape <- par.claims$shape else stop(sprintf("parameter %s missing in %s", dQuote("shape"), sQuote("par.claims"))) if ("rate" %in% p) rate <- par.claims$rate else if ("scale" %in% p) rate <- 1/par.claims$scale else stop(sprintf("parameter %s or %s missing in %s", dQuote("rate"), dQuote("scale"), sQuote("par.claims"))) if (length(shape) < length(rate)) shape <- rep_len(shape, length(rate)) else rate <- rep_len(rate, length(shape)) n <- sum(shape) if ("weights" %in% p && length(shape) > 1L) { prob <- numeric(n) prob[cumsum(c(1, head(shape, -1)))] <- par.claims$weights } else if (length(shape) == 1L) prob <- c(1, rep.int(0, n - 1)) else stop(sprintf("parameter %s missing in %s", dQuote("weights"), sQuote("par.claims"))) rates <- diag(rep.int(-rate, shape), n) if (n > 1 && any(shape > 1)) { tmp <- -head(diag(rates), -1L) tmp[cumsum(head(shape, -1L))] <- 0 # insert 0s in "ll corners" rates[cbind(seq_len(n - 1), seq(2, len = n - 1))] <- tmp } } else # claims == "phase-type" { if ("prob" %in% p) prob <- par.claims$prob else stop(sprintf("parameter %s missing in %s", dQuote("prob"), sQuote("par.claims"))) if ("rates" %in% p) rates <- par.claims$rates else stop(sprintf("parameter %s missing in %s", dQuote("rates"), sQuote("par.claims"))) n <- length(prob) if (!(is.matrix(rates) && nrow(rates) == n)) stop(sprintf("invalid parameters in %s", sQuote("par.claims"))) } ## ============================================== ## ================================================= ## Extraction of the interarrival times parameters choices <- switch(wait, exponential = c("rate", "weights"), Erlang = c("shape", "rate", "scale", "weights"), "phase-type" = c("prob", "rates")) i <- pmatch(names(par.wait), choices, nomatch = 0L, duplicates.ok = TRUE) if (all(i == 0L)) stop(sprintf("parameters %s missing in %s", paste(dQuote(choices), collapse = ", "), sQuote("par.wait"))) par.wait <- par.wait[i > 0L] # keep relevant components p <- choices[i[i > 0L]] # keep relevant names names(par.wait) <- p # use full names if (wait == "exponential") { if ("rate" %in% p) rate <- par.wait$rate else stop(sprintf("parameter %s missing in %s", dQuote("rate"), sQuote("par.wait"))) m <- length(rate) if ("weights" %in% p && m > 1L) prob.w <- rep_len(par.wait$weights, m) else if (m == 1L) prob.w <- 1 else stop(sprintf("parameter %s missing in %s", dQuote("weights"), sQuote("par.wait"))) rates.w <- diag(-rate, m) } else if (wait == "Erlang") { if ("shape" %in% p) shape <- par.wait$shape else stop(sprintf("parameter %s missing in %s", dQuote("shape"), sQuote("par.wait"))) if ("rate" %in% p) rate <- par.wait$rate else if ("scale" %in% p) rate <- 1/par.wait$scale else stop(sprintf("parameter %s or %s missing in %s", dQuote("rate"), dQuote("scale"), sQuote("par.wait"))) if (length(shape) < length(rate)) shape <- rep_len(shape, length(rate)) else rate <- rep_len(rate, length(shape)) m <- sum(shape) if ("weights" %in% p && length(shape) > 1L) { prob.w <- numeric(sum(shape)) prob.w[cumsum(c(1, head(shape, -1L)))] <- par.wait$weights } else if (length(shape) == 1L) prob.w <- c(1, rep.int(0, m - 1)) else stop(sprintf("parameter %s missing in %s", dQuote("weights"), sQuote("par.wait"))) rates.w <- diag(rep.int(-rate, shape), m) if (m > 1 && any(shape > 1)) { tmp <- -head(diag(rates.w), -1L) tmp[cumsum(head(shape, -1L))] <- 0 # insert 0s in "ll corners" rates.w[cbind(seq_len(m - 1), seq(2, len = m - 1))] <- tmp } } else # wait == "phase-type" { if ("prob" %in% p) prob.w <- par.wait$prob else stop(sprintf("parameter %s missing in %s", dQuote("prob"), sQuote("par.wait"))) if ("rates" %in% p) rates.w <- par.wait$rates else stop(sprintf("parameter %s missing in %s", dQuote("rates"), sQuote("par.wait"))) m <- length(prob.w) if (!(is.matrix(rates.w) && nrow(rates.w) == m)) stop(sprintf("invalid parameters in %s", sQuote("par.wait"))) } ## ================================================= ## Empty definition of the output function. The body is set later. FUN <- function(u, survival = FALSE, lower.tail = !survival) {} ## Cramer-Lundberg model if (wait == "exponential" && m == 1L) { ## Special case with an explicit solution if (claims == "exponential" && n == 1L) { lambda <- -drop(rates.w)/premium.rate body(FUN) <- substitute({res <- a * exp(-(b) * u); if (lower.tail) res else 0.5 - res + 0.5}, list(a = -lambda/drop(rates), b = -drop(rates) - lambda)) environment(FUN) <- new.env() # new, empty environment class(FUN) <- c("ruin", class(FUN)) return(FUN) } ## Use phase-type representation for all other claim severity models. pi <- drop(rates.w) * prob %*% solve(rates)/premium.rate Q <- rates - rowSums(rates) %*% pi } ## Sparre Andersen model (interarrival times other than single exponential) else { ## Matrix Q is a "fixed point" of some function (Asmussen & ## Rolski, 1992, p. 265-266). Many elements of this function ## never change, hence they are computed once and for all ## here. In <- diag(n) # n x n identity matrix Im <- diag(m) # m x m identity matrix t0pi <- -rowSums(rates) %o% prob # "multiple" of A(Q) A <- In %x% rbind(prob.w) # first term of A(Q) B <- In %x% rates.w # rhs of the Kronecker sum C <- In %x% -rowSums(rates.w) # third term of A(Q) if (echo) { cat("Iteration\tMatrix Q (column major order)\n") exp <- expression(cat(" ", count, "\t\t ", Q1 <- Q, fill = TRUE)) } else exp <- expression(Q1 <- Q) Q <- rates count <- 0L repeat { eval(exp) if (maxit < (count <- count + 1L)) { warning("maximum number of iterations reached before obtaining convergence") break } Q1 <- Q Q <- rates - t0pi %*% A %*% solve(Q %x% Im + B, C) if (max(rowSums(abs(Q - Q1))) < tol) break } pi <- colSums(Q - rates)/(-sum(rates) * premium.rate) } ## Compute the probability of ruin using the cdf of a phase-type ## distribution with parameters pi and Q. body(FUN) <- substitute(pphtype(u, a, b, lower.tail = !lower.tail), list(a = pi, b = Q)) environment(FUN) <- new.env() # new, empty environment class(FUN) <- c("ruin", class(FUN)) FUN } plot.ruin <- function(x, from = NULL, to = NULL, add = FALSE, xlab = "u", ylab = expression(psi(u)), main = "Probability of Ruin", xlim = NULL, ...) curve(x, from = from, to = to, add = add, xlab = xlab, ylab = ylab, main = main, xlim = xlim, ...) actuar/R/ZeroModifiedNegativeBinomial.R0000644000176200001440000000157015147745722017614 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}zmnbinom functions to compute ### characteristics of the Zero Modified Negative Binomial ### distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dzmnbinom <- function (x, size, prob, p0, log = FALSE) .External(C_actuar_do_dpq, "dzmnbinom", x, size, prob, p0, log) pzmnbinom <- function(q, size, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pzmnbinom", q, size, prob, p0, lower.tail, log.p) qzmnbinom <- function(p, size, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qzmnbinom", p, size, prob, p0, lower.tail, log.p) rzmnbinom <- function(n, size, prob, p0) .External(C_actuar_do_random, "rzmnbinom", n, size, prob, p0) actuar/R/unroll.R0000644000176200001440000000241115147745722013404 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Display all values of a matrix of vectors by 'unrolling' the ### object vertically or horizontally. ### ### AUTHORS: Louis-Philippe Pouliot, ### Vincent Goulet unroll <- function(x, bycol = FALSE, drop = TRUE) { dx <- dim(x) if (length(dx) > 2L) stop(sprintf("%s must be a vector or a matrix", sQuote("x"))) if (length(dx) < 2L) x <- rbind(x, deparse.level = 0L) fun <- function(x) if (identical(x, NA)) NA else length(x) frequencies <- array(sapply(x, fun), dim = dim(x)) if (bycol) { lengths <- colSums(frequencies, na.rm = TRUE) mat <- matrix(NA, max(lengths), ncol(x), dimnames = dimnames(x)) for (i in seq_len(ncol(x))) if (0L < (lengthi <- lengths[i])) mat[seq_len(lengthi), i] <- unlist(x[!is.na(x[, i]), i]) } else { lengths <- rowSums(frequencies, na.rm = TRUE) mat <- matrix(NA, nrow(x), max(lengths), dimnames = list(rownames(x), NULL)) for (i in seq_len(nrow(x))) if (0L < (lengthi <- lengths[i])) mat[i, seq_len(lengthi)] <- unlist(x[i, !is.na(x[i, ])]) } mat[, , drop = drop] } actuar/R/WeibullMoments.R0000644000176200001440000000114415147745722015041 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,lev}weibull functions to compute raw and ### limited moments for the Weibull distribution (as defined in R). ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet mweibull <- function(order, shape, scale = 1) .External(C_actuar_do_dpq, "mweibull", order, shape, scale, FALSE) levweibull <- function(limit, shape, scale = 1, order = 1) .External(C_actuar_do_dpq, "levweibull", limit, shape, scale, order, FALSE) actuar/R/rcompound.R0000644000176200001440000001053115147745722014101 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Simulation of standard, non hierarchical, compound models. Uses a ### simplified version of the syntax of 'rcomphierarc' for model ### specfification. ### ### Where 'rcomphierarc' was developed for flexibility, the functions ### therein aim at execution speed. Various algorithms were tested. ### No argument validity checks. ### ### AUTHOR: Vincent Goulet rcompound <- function(n, model.freq, model.sev, SIMPLIFY = TRUE) { ## Validity checks. if (any(is.na(n)) || any(n < 0)) stop(sprintf("invalid first argument %s", sQuote("n"))) ## Convert model expressions into language objects. cl.freq <- substitute(model.freq) cl.sev <- substitute(model.sev) ## If a model expression was actually an object containing the ## model, we need to evaluate the object to retrieve the model. ## If the resulting object is an expression object, its first ## element is the language object we are after. if (is.name(cl.freq)) { cl.freq <- eval.parent(cl.freq) if (is.expression(cl.freq)) cl.freq <- cl.freq[[1L]] } if (is.name(cl.sev)) { cl.sev <- eval.parent(cl.sev) if (is.expression(cl.sev)) cl.sev <- cl.sev[[1L]] } ## If a model expression is wrapped into 'expression' (as in ## 'rcomphierarc'), get rid of the call. if (cl.freq[[1L]] == "expression") cl.freq <- cl.freq[[-1L]] if (cl.sev[[1L]] == "expression") cl.sev <- cl.sev[[-1L]] ## Initialize the output vector. We will use the fact that 'res' ## is filled with zeros later. res <- numeric(n) ## Add the number of variates to the 'model.freq' call. cl.freq$n <- n ## Generate frequencies. N <- eval.parent(cl.freq) ## Add the number of variates to the 'model.sev' call. cl.sev$n <- sum(N) ## Generate all severities. x <- eval.parent(cl.sev) ## Create a vector that will be used as a factor to regroup ## severities for the computation of aggregate values. Idea: ## assign one integer to each frequency and repeat that integer a ## number of times equal to the frequency. For example, if the ## frequencies are (2, 0, 1, 3), then the vector will be (1, 1, 3, ## 4, 4, 4). f <- rep.int(seq_len(n), N) ## Compute aggregate values and put them in the appropriate ## positions in the output vector. The positions corresponding to ## zero frequencies are already initialized with zeros. res[which(N != 0)] <- tapply(x, f, sum) if (SIMPLIFY) res else list(aggregate = res, frequency = N, severity = x) } rcomppois <- function(n, lambda, model.sev, SIMPLIFY = TRUE) { ## Validity checks. if (any(is.na(n)) || any(n < 0)) stop(sprintf("invalid first argument %s", sQuote("n"))) if (any(lambda < 0)) stop(sprintf("invalid values in %s", sQuote("lambda"))) ## Convert model expression into language object. cl.sev <- substitute(model.sev) ## If the model expression was actually an object containing the ## model, we need to evaluate the object to retrieve the model. ## If the resulting object is an expression object, its first ## element is the language object we are after. if (is.name(cl.sev)) { cl.sev <- eval.parent(cl.sev) if (is.expression(cl.sev)) cl.sev <- cl.sev[[1L]] } ## Get rid of the eventual 'expression' call in the language ## object. if (cl.sev[[1L]] == "expression") cl.sev <- cl.sev[[-1L]] ## Initialize the output vector. res <- numeric(n) ## Generate frequencies from Poisson distribution. N <- rpois(n, lambda) ## Add the number of variates to the 'model.sev' call. cl.sev$n <- sum(N) ## Generate all severities. x <- eval.parent(cl.sev) ## Create a vector that will be used as a factor to regroup ## severities for the computation of aggregate values. (See ## comments in 'rcompound' for details.) f <- rep.int(seq_len(n), N) ## Compute aggregate values and put them in the appropriate ## positions in the output vector. res[which(N != 0)] <- tapply(x, f, sum) if (SIMPLIFY) res else list(aggregate = res, frequency = N, severity = x) } actuar/R/PhaseType.R0000644000176200001440000000230215147745722013772 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,r,m,mgf}ph functions to compute ### characteristics of Phase-type distributions with cumulative ### distribution function ### ### Pr[X <= x] = 1 - pi %*% exp(Tx) %*% e, ### ### where 'pi' is the initial probability vector, 'T' is the ### subintensity matrix and 'e' is 1-vector of R^m. ### ### See Bladt, M. (2005), "A review on phase-type distributions and ### their use in risk theory", Astin Bulletin 35, p. 145-161. ### ### AUTHORS: Christophe Dutang, Vincent Goulet dphtype <- function(x, prob, rates, log = FALSE) .External(C_actuar_do_dpqphtype, "dphtype", x, prob, rates, log) pphtype <- function(q, prob, rates, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpqphtype, "pphtype", q, prob, rates, lower.tail, log.p) rphtype <- function(n, prob, rates) .External(C_actuar_do_randomphtype, "rphtype", n, prob, rates) mphtype <- function(order, prob, rates) .External(C_actuar_do_dpqphtype, "mphtype", order, prob, rates, FALSE) mgfphtype <- function(t, prob, rates, log = FALSE) .External(C_actuar_do_dpqphtype, "mgfphtype", t, prob, rates, log) actuar/R/mean.grouped.data.R0000644000176200001440000000130315147745722015364 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Mean of grouped data objects ### ### See Klugman, Panjer & Willmot, Loss Models, Wiley, 1998. ### ### AUTHOR: Vincent Goulet ## New method of base::mean generic for grouped data mean.grouped.data <- function(x, ...) { ## Get group boundaries cj <- eval(expression(cj), envir = environment(x)) ## Compute group midpoints midpoints <- cj[-length(cj)] + diff(cj)/2 ## Extract frequencies columns by dropping the boundaries column; ## convert to matrix for use in crossprod() x <- as.matrix(x[-1L]) ## Compute mean per column drop(crossprod(x, midpoints))/colSums(x) } actuar/R/CTE.R0000644000176200001440000000345415147745722012514 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Conditional Tail Expectation for objects of class 'aggregateDist'. ### ### AUTHORS: Tommy Ouellet, Vincent Goulet CTE <- function(x, ...) UseMethod("CTE") CTE.aggregateDist <- function(x, conf.level = c(0.9, 0.95, 0.99), names = TRUE, ...) { chkDots(...) # method does not use '...' label <- comment(x) ## Normal approximation; an exact formula is available if (label == "Normal approximation") { m <- get("mean", environment(x)) sd <- sqrt(get("variance", environment(x))) res <- m + sd * dnorm(qnorm(conf.level)) / (1 - conf.level) } ## Normal Power approximation; explicit formula in Castaner, ## Claramunt and Marmol (2013) else if (label == "Normal Power approximation") { m <- get("mean", envir = environment(x)) sd <- sqrt(get("variance", envir = environment(x))) sk <- get("skewness", envir = environment(x)) q <- qnorm(conf.level) res <- m + sd * dnorm(q) * (1 + sk * q/6) / (1 - conf.level) } ## Recursive method, simulation and convolutions; each yield a ## step function that can be used to make calculations. else { val <- get("x", envir = environment(x)) prob <- get("fs", envir = environment(x)) f2 <- function(a) { pos <- val > VaR(x, a) drop(crossprod(val[pos], prob[pos])) / sum(prob[pos]) } res <- sapply(conf.level, f2) } if (names) { dig <- max(2, getOption("digits")) names(res) <- formatC(paste(100 * conf.level, "%", sep = ""), format = "fg", width = 1, digits = dig) } res } TVaR <- CTE actuar/R/ZeroModifiedPoisson.R0000644000176200001440000000147015147745722016030 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}zmpois functions to compute ### characteristics of the Zero Modified Poisson distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dzmpois <- function (x, lambda, p0, log = FALSE) .External(C_actuar_do_dpq, "dzmpois", x, lambda, p0, log) pzmpois <- function(q, lambda, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pzmpois", q, lambda, p0, lower.tail, log.p) qzmpois <- function(p, lambda, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qzmpois", p, lambda, p0, lower.tail, log.p) rzmpois <- function(n, lambda, p0) .External(C_actuar_do_random, "rzmpois", n, lambda, p0) actuar/R/rmixture.R0000644000176200001440000000367215147745722013762 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Simulation of discrete mixtures ### ### f(x) = p_1 f_1(x) + ... + p_n f_n(x). ### ### Uses the syntax of rcomphierarc() for model specfification. ### ### AUTHOR: Vincent Goulet rmixture <- function(n, probs, models, shuffle = TRUE) { ## Validity checks (similar to other r functions and to ## rmultinom). if (any(is.na(n)) || any(n < 0)) stop(sprintf("invalid first argument %s", sQuote("n"))) if (all(probs <= 0)) stop("no positive probabilities") if ((!is.expression(models)) || (length(models) == 0L)) stop(sprintf("invalid third argument %s", sQuote("models"))) ## Number of models in the mixture. m <- max(length(probs), length(models)) ## Number of variates to generate: 'length(n)' if length of 'n' is ## > 1, like other 'r' functions. if (length(n) > 1L) n <- length(n) ## Number of variates from each model. By definition of the ## multinomial distribution, sum(nj) == n. ## ## Note that 'rmultinom' will normalize probabilities to sum 1. nj <- rmultinom(1, size = n, prob = rep_len(probs, m)) ## Auxiliary function to generate 'n' variates from the model ## given in 'expr'. The expressions end up being evaluated three ## frames below the current one. f <- function(n, expr) { expr$n <- n eval.parent(expr, n = 3) } ## Simulate from each model the appropriate number of times and ## return result as an atomic vector. Variates are ordered by ## model: all random variates from model 1, then all random ## variates from model 2, and so on. x <- unlist(mapply(f, n = nj, expr = rep_len(models, m), SIMPLIFY = FALSE)) ## Return variates reshuffled or in the order above as per ## argument 'shuffle'. if (shuffle) x[sample.int(n)] else x } actuar/R/simS.R0000644000176200001440000000224115147745722013005 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Simulation of a aggregate claim amounts ### ### AUTHORS: Vincent Goulet ### and Louis-Philippe Pouliot simS <- function(n, model.freq, model.sev) { ## Prepare the call to simul() by building up 'nodes' level.names <- names(if (is.null(model.freq)) model.sev else model.freq) nlevels <- length(level.names) nodes <- as.list(c(rep(1, nlevels - 1), n)) names(nodes) <- level.names ## Get sample x <- aggregate(simul(nodes = nodes, model.freq = model.freq, model.sev = model.sev))[-1] ## Compute the empirical cdf of the sample. Done manually instead ## of calling stats::ecdf to keep a copy of the empirical pmf in ## the environment without computing it twice. x <- sort(x) vals <- unique(x) fs <- tabulate(match(x, vals))/length(x) FUN <- approxfun(vals, pmin(cumsum(fs), 1), method = "constant", yleft = 0, yright = 1, f = 0, ties = "ordered") class(FUN) <- c("ecdf", "stepfun", class(FUN)) assign("fs", fs, envir = environment(FUN)) FUN } actuar/R/InverseBurr.R0000644000176200001440000000311315147745722014337 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}burr functions to compute ### characteristics of the Inverse Burr distribution. The version used ### in these functions has cumulative distribution function ### ### Pr[X <= x] = (u/(1 + u))^shape1, u = (x/scale)^shape2, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dinvburr <- function (x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dinvburr", x, shape1, shape2, scale, log) pinvburr <- function(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvburr", q, shape1, shape2, scale, lower.tail, log.p) qinvburr <- function(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qinvburr", p, shape1, shape2, scale, lower.tail, log.p) rinvburr <- function(n, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rinvburr", n, shape1, shape2, scale) minvburr <- function(order, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "minvburr", order, shape1, shape2, scale, FALSE) levinvburr <- function(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levinvburr", limit, shape1, shape2, scale, order, FALSE) actuar/R/bayes.R0000644000176200001440000001533115147745722013201 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Pure bayesian credibility calculations. ### ### AUTHORS: Alexandre Parent , ### Vincent Goulet bayes <- function(x, likelihood = c("poisson", "bernoulli", "geometric", "exponential", "normal", "binomial", "negative binomial", "gamma", "pareto"), shape, rate = 1, scale = 1/rate, shape1, shape2, mean = 0, sd = 1, size, shape.lik, sd.lik, min) { likelihood <- match.arg(likelihood) ## We need to treat separately the (Single Parameter, or ## Translated) Pareto/Gamma case given the different form of the ## individual mean and the "credibility factor" (which isn't one, ## really). if (likelihood == "pareto") { if (missing(min)) stop("lower bound of the likelihood missing") if (missing(shape) || (missing(rate) && missing(scale))) stop(sprintf("one of the Gamma prior parameter %s, %s or %s missing", dQuote("shape"), dQuote("rate"), dQuote("scale"))) coll <- shape * scale vars <- c(NA, NA) # not pertinent here ## Computation of individual means and credibility factors ## differs depending on the type of data provided in argument. if (is.null(x)) # no data cred <- ind.means <- n <- 0 else if (is.vector(x, mode = "numeric")) # atomic vector { n <- length(x) sumlog <- sum(log(x)) - n * log(min) ind.means <- n/sumlog cred <- 1/(1 + 1/(scale * sumlog)) } else # matrix or data frame { n <- ncol(x) sumlog <- rowSums(log(x)) - n * log(min) ind.means <- n/sumlog cred <- 1/(1 + 1/(scale * sumlog)) } } ## Now the usual linear Bayes cases. else { if (likelihood == "poisson") { if (missing(shape) || (missing(rate) && missing(scale))) stop(sprintf("one of the Gamma prior parameter %s, %s or %s missing", dQuote("shape"), dQuote("rate"), dQuote("scale"))) coll <- shape * scale vars <- c(coll * scale, coll) K <- 1/scale } else if (likelihood == "bernoulli") { if (missing(shape1) || missing(shape2)) stop(sprintf("one of the Beta prior parameter %s or %s missing", dQuote("shape1"), dQuote("shape2"))) K <- shape1 + shape2 coll <- shape1/K vars <- (shape1 * shape2) * c(1, K)/(K^2 * (K + 1)) } else if (likelihood == "binomial") { if (missing(shape1) || missing(shape2)) stop(sprintf("one of the Beta prior parameter %s or %s missing", dQuote("shape1"), dQuote("shape2"))) if (missing(size)) stop(sprintf("parameter %s of the likelihood missing", dQuote("size"))) K <- (shape1 + shape2)/size coll <- shape1/K vars <- (shape1 * shape2) * c(1, K)/(K^2 * (shape1 + shape2 + 1)) } else if (likelihood == "geometric") { if (missing(shape1) || missing(shape2)) stop(sprintf("one of the Beta prior parameter %s or %s missing", dQuote("shape1"), dQuote("shape2"))) K <- shape1 - 1 coll <- shape2/K vars <- shape2 * (shape1 + shape2 - 1)/(K * (K - 1)) vars <- c(vars/K, vars) } else if (likelihood == "negative binomial") { if (missing(shape1) || missing(shape2)) stop(sprintf("one of the Beta prior parameter %s or %s missing", dQuote("shape1"), dQuote("shape2"))) if (missing(size)) stop(sprintf("parameter %s of the likelihood missing", dQuote("size"))) K <- (shape1 - 1)/size coll <- shape2/K vars <- shape2 * (shape1 + shape2 - 1)/(K * (shape1 - 2)) vars <- c(vars/K, vars) } else if (likelihood == "exponential") { if (missing(shape) || (missing(rate) && missing(scale))) stop(sprintf("one of the Gamma prior parameter %s, %s or %s missing", dQuote("shape"), dQuote("rate"), dQuote("scale"))) K <- shape - 1 coll <- 1/(K * scale) vars <- c(coll^2, coll/scale)/(shape - 2) } else if (likelihood == "gamma") { if (missing(shape) || (missing(rate) && missing(scale))) stop(sprintf("one of the Gamma prior parameter %s, %s or %s missing", dQuote("shape"), dQuote("rate"), dQuote("scale"))) if (missing(shape.lik)) stop(sprintf("parameter %s of the likelihood missing", dQuote("shape.lik"))) K <- (shape - 1)/shape.lik coll <- 1/(K * scale) vars <- c(coll^2, coll/scale)/(shape - 2) } else if (likelihood == "normal") { if (missing(sd.lik)) stop(sprintf("parameter %s of the likelihood missing", dQuote("sd.lik"))) coll <- mean vars <- c(sd, sd.lik)^2 K <- vars[2L]/vars[1L] } else stop("unsupported likelihood") ## Computation of individual means and credibility factors ## differs depending on the type of data provided in argument. if (is.null(x)) # no data cred <- ind.means <- n <- 0 else if (is.vector(x, mode = "numeric")) # atomic vector { n <- length(x) ind.means <- mean(x) cred <- n/(n + K) } else # matrix or data frame { n <- ncol(x) ind.means <- rowMeans(x) cred <- n/(n + K) } } structure(list(means = list(coll, ind.means), weights = list(NULL, n), unbiased = vars, iterative = NULL, cred = cred, nodes = 1L), class = "bayes", model = "Linear Bayes") } ## Premium calculation is identical to the Buhlmann-Straub case; no ## need for another method. See bstraub.R for the definition. # predict.bayes <- predict.bstraub actuar/R/betaint.R0000644000176200001440000000123015147745722013515 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### The "beta integral" ### ### B(a, b; x) = Gamma(a + b) int_0^x t^(a-1) (1 - t)^(b-1) dt ### ### a > 0, b != -1, -2, ..., 0 < x < 1. This mathematical function is ### only used at the C level in the package. The R function therein ### provides an R interface just in case it could be useful. ### ### The function is *not* exported. ### ### See Appendix A of Klugman, Panjer and Willmot (2012), Loss Models, ### Fourth Edition, Wiley. ### ### AUTHOR: Vincent Goulet ## see src/betaint.c betaint <- function(x, a, b) .External(C_actuar_do_betaint, x, a, b) actuar/R/aggregateDist.R0000644000176200001440000002065215147745722014652 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Use one of five methods to compute the aggregate claim amount ### distribution of a portfolio over a period given a frequency and a ### severity model or the true moments of the distribution. ### ### AUTHORS: Vincent Goulet , ### Louis-Philippe Pouliot aggregateDist <- function(method = c("recursive", "convolution", "normal", "npower", "simulation"), model.freq = NULL, model.sev = NULL, p0 = NULL, x.scale = 1, convolve = 0, moments, nb.simul, ..., tol = 1e-06, maxit = 500, echo = FALSE) { Call <- match.call() ## The method used essentially tells which function should be ## called for the calculation of the aggregate claims ## distribution. method <- match.arg(method) if (method == "normal") { ## An error message is issued if the number of moments listed ## is not appropriate for the method. However it is the user's ## responsability to list the moments in the correct order ## since the vector is not required to be named. if (missing(moments) || length(moments) < 2) stop(sprintf("%s must supply the mean and variance of the distribution", sQuote("moments"))) FUN <- normal(moments[1], moments[2]) comment(FUN) <- "Normal approximation" } else if (method == "npower") { if (missing(moments) || length(moments) < 3) stop(sprintf("%s must supply the mean, variance and skewness of the distribution", sQuote("moments"))) FUN <- npower(moments[1], moments[2], moments[3]) comment(FUN) <- "Normal Power approximation" } else if (method == "simulation") { if (missing(nb.simul)) stop(sprintf("%s must supply the number of simulations", sQuote("nb.simul"))) if (is.null(names(model.freq)) && is.null(names(model.sev))) stop(sprintf("expressions in %s and %s must be named", sQuote("model.freq"), sQuote("model.sev"))) FUN <- simS(nb.simul, model.freq = model.freq, model.sev = model.sev) comment(FUN) <- "Approximation by simulation" } else { ## "recursive" and "convolution" cases. Both require a ## discrete distribution of claim amounts, that is a vector of ## probabilities in argument 'model.sev'. if (!is.numeric(model.sev)) stop(sprintf("%s must be a vector of probabilities", sQuote("model.sev"))) ## Recursive method uses a model for the frequency distribution. if (method == "recursive") { if (is.null(model.freq) || !is.character(model.freq)) stop("frequency distribution must be supplied as a character string") dist <- match.arg(tolower(model.freq), c("poisson", "geometric", "negative binomial", "binomial", "logarithmic", "zero-truncated poisson", "zero-truncated geometric", "zero-truncated negative binomial", "zero-truncated binomial", "zero-modified logarithmic", "zero-modified poisson", "zero-modified geometric", "zero-modified negative binomial", "zero-modified binomial")) FUN <- panjer(fx = model.sev, dist = dist, p0 = p0, x.scale = x.scale, ..., convolve = convolve, tol = tol, maxit = maxit, echo = echo) comment(FUN) <- "Recursive method approximation" } ## Convolution method requires a vector of probabilites in ## argument 'model.freq'. else if (method == "convolution") { if (!is.numeric(model.freq)) stop(sprintf("%s must be a vector of probabilities", sQuote("model.freq"))) FUN <- exact(fx = model.sev, pn = model.freq, x.scale = x.scale) comment(FUN) <- "Exact calculation (convolutions)" } else stop("internal error") } ## Return cumulative distribution function class(FUN) <- c("aggregateDist", class(FUN)) attr(FUN, "call") <- Call FUN } print.aggregateDist <- function(x, ...) { cat("\nAggregate Claim Amount Distribution\n") cat(" ", label <- comment(x), "\n\n", sep = "") cat("Call:\n") print(attr(x, "call"), ...) cat("\n") if (label %in% c("Exact calculation (convolutions)", "Recursive method approximation", "Approximation by simulation")) { n <- length(get("x", envir = environment(x))) cat("Data: (", n, "obs. )\n") numform <- function(x) paste(formatC(x, digits = 4, width = 5), collapse = ", ") i1 <- 1L:min(3L, n) i2 <- if (n >= 4L) max(4L, n - 1L):n else integer() xx <- eval(expression(x), envir = environment(x)) cat(" x[1:", n, "] = ", numform(xx[i1]), if (n > 3L) ", ", if (n > 5L) " ..., ", numform(xx[i2]), "\n", sep = "") cat("\n") } if (label %in% c("Normal approximation", "Normal Power approximation")) cat(attr(x, "source"), "\n") invisible(x) } plot.aggregateDist <- function(x, xlim, ylab = expression(F[S](x)), main = "Aggregate Claim Amount Distribution", sub = comment(x), ...) { ## Function plot() is used for the step cdfs and function curve() ## in the continuous cases. if ("stepfun" %in% class(x)) { ## Method for class 'ecdf' will most probably be used. NextMethod(main = main, ylab = ylab, ...) } else { ## Limits for the x-axis are supplied if none are given ## in argument. if (missing(xlim)) { mean <- get("mean", envir = environment(x)) sd <- sqrt(get("variance", envir = environment(x))) xlim <- c(mean - 3 * sd, mean + 3 * sd) } curve(x, main = main, ylab = ylab, xlim = xlim, ylim = c(0, 1), ...) } mtext(sub, line = 0.5) } summary.aggregateDist <- function(object, ...) structure(object, class = c("summary.aggregateDist", class(object)), ...) print.summary.aggregateDist <- function(x, ...) { cat(ifelse(comment(x) %in% c("Normal approximation", "Normal Power approximation"), "Aggregate Claim Amount CDF:\n", "Aggregate Claim Amount Empirical CDF:\n")) q <- quantile(x, p = c(0.25, 0.5, 0.75)) expectation <- mean(x) if (comment(x) %in% c("Normal approximation", "Normal Power approximation")) { min <- 0 max <- NA } else { max <- tail(eval(expression(x), environment(x)), 1) min <- head(eval(expression(x), environment(x)), 1) } res <- c(min, q[c(1, 2)], expectation, q[3], max) names(res) <- c("Min.", "1st Qu.", "Median", "Mean", "3rd Qu.", "Max.") print(res, ...) invisible(x) } mean.aggregateDist <- function(x, ...) { label <- comment(x) ## Simply return the value of the true mean given in argument in ## the case of the Normal and Normal Power approximations. if (label %in% c("Normal approximation", "Normal Power approximation")) return(get("mean", envir = environment(x))) ## For the recursive, exact and simulation methods, compute the ## mean from the stepwise cdf using the pmf saved in the ## environment of the object. drop(crossprod(get("x", envir = environment(x)), get("fs", envir = environment(x)))) } diff.aggregateDist <- function(x, ...) { label <- comment(x) ## The 'diff' method is defined for the recursive, exact and ## simulation methods only. if (label == "Normal approximation" || label == "Normal Power approximation") stop("function not defined for approximating distributions") ## The probability vector is already stored in the environment of ## the "aggregateDist" object. get("fs", environment(x)) } actuar/R/hache.origin.R0000644000176200001440000001334315147745722014435 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Auxiliary function to fit regression credibility model using the ### original Hachemeister model. ### ### AUTHORS: Tommy Ouellet, Vincent Goulet ### codetools does not like the way 'coll1' is defined in ### 'hache.origin' below. Avoid false positive in R CMD check. if (getRversion() >= "2.15.1") utils::globalVariables(c("coll1")) hache.origin <- function(ratios, weights, xreg, tol, maxit, echo) { ## Frequently used values weights.s <- rowSums(weights, na.rm = TRUE) # contract total weights has.data <- which(weights.s > 0) # contracts with data ncontracts <- nrow(ratios) # number of contracts eff.ncontracts <- length(has.data) # effective number of contracts p <- ncol(xreg) # rank (>= 2) of design matrix n <- nrow(xreg) # number of observations ## Fit linear model to each contract. For contracts without data, ## fit some sort of empty model to ease use in predict.hache(). f <- function(i) { z <- if (i %in% has.data) # contract with data { y <- ratios[i, ] not.na <- !is.na(y) lm.wfit(xreg[not.na, , drop = FALSE], y[not.na], weights[i, not.na]) } else # contract without data lm.fit(xreg, rep.int(0, n)) z[c("coefficients", "residuals", "weights", "rank", "qr")] } fits <- lapply(seq_len(ncontracts), f) ## Individual regression coefficients ind <- sapply(fits, coef) ind[is.na(ind)] <- 0 ## Individual variance estimators. The contribution of contracts ## without data is 0. S <- function(z) # from stats::summary.lm { nQr <- NROW(z$qr$qr) r1 <- z$rank r <- z$residuals w <- z$weights sum(w * r^2) / (nQr - r1) } sigma2 <- sapply(fits[has.data], S) sigma2[is.nan(sigma2)] <- 0 ## Initialization of a few containers: p x p x ncontracts arrays ## for the weight and credibility matrices; p x p matrices for the ## between variance-covariance matrix and total credibility ## matrix. cred <- W <- array(0, c(p, p, ncontracts)) A <- cred.s <- matrix(0, p, p) ## Weight matrices: we use directly (X'WX)^{-1}. This is quite ## different from hache.barycenter(). V <- function(z) # from stats::summary.lm { r1 <- z$rank if (r1 == 1L) diag(as.double(chol2inv(z$qr$qr[1L, 1L, drop = FALSE])), p) else chol2inv(z$qr$qr[1L:r1, 1L:r1, drop = FALSE]) } W[, , has.data] <- sapply(fits[has.data], V) ## Starting credibility matrices and collective regression ## coefficients. cred[, , has.data] <- diag(p) # identity matrices coll <- rowSums(ind) / eff.ncontracts # coherent with above ## === ESTIMATION OF WITHIN VARIANCE === s2 <- mean(sigma2) ## === ESTIMATION OF THE BETWEEN VARIANCE-COVARIANCE MATRIX === ## ## This is an iterative procedure similar to the Bischel-Straub ## estimator. Following Goovaerts & Hoogstad, stopping criterion ## is based in the collective regression coefficients estimates. ## ## If printing of iterations was asked for, start by printing a ## header and the starting values. if (echo) { cat("Iteration\tCollective regression coefficients\n") exp <- expression(cat(" ", count, "\t\t ", coll1 <- coll, fill = TRUE)) } else exp <- expression(coll1 <- coll) ## Iterative procedure count <- 0 repeat { eval(exp) ## Stop after 'maxit' iterations if (maxit < (count <- count + 1)) { warning("maximum number of iterations reached before obtaining convergence") break } ## Calculation of the between variance-covariance matrix. A[] <- rowSums(sapply(has.data, function(i) cred[, , i] %*% tcrossprod(ind[, i] - coll))) / (eff.ncontracts - 1) ## Symmetrize A A <- (A + t(A))/2 ## New credibility matrices cred[, , has.data] <- sapply(has.data, function(i) A %*% solve(A + s2 * W[, , i])) ## New collective regression coefficients cred.s <- apply(cred[, , has.data], c(1L, 2L), sum) coll <- solve(cred.s, rowSums(sapply(has.data, function(i) cred[, , i] %*% ind[, i]))) ## Test for convergence if (max(abs((coll - coll1)/coll1)) < tol) break } ## Final calculation of the between variance-covariance matrix and ## credibility matrices. A[] <- rowSums(sapply(has.data, function(i) cred[, , i] %*% tcrossprod(ind[, i] - coll))) / (eff.ncontracts - 1) A <- (A + t(A))/2 cred[, , has.data] <- sapply(has.data, function(i) A %*% solve(A + s2 * W[, , i])) ## Credibility adjusted coefficients. The coefficients of the ## models are replaced with these values. That way, prediction ## will be trivial using predict.lm(). for (i in seq_len(ncontracts)) fits[[i]]$coefficients <- coll + drop(cred[, , i] %*% (ind[, i] - coll)) ## Add names to the collective coefficients vector. names(coll) <- rownames(ind) ## Results list(means = list(coll, ind), weights = list(cred.s, W), unbiased = NULL, iterative = list(A, s2), cred = cred, nodes = list(ncontracts), adj.models = fits) } actuar/R/hist.grouped.data.R0000644000176200001440000000431215147745722015416 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Histogram for grouped data ### ### See Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Vincent Goulet , Mathieu Pigeon hist.grouped.data <- function(x, freq = NULL, probability = !freq, density = NULL, angle = 45, col = NULL, border = NULL, main = paste("Histogram of", xname), xlim = range(x), ylim = NULL, xlab = xname, ylab, axes = TRUE, plot = TRUE, labels = FALSE, ...) { ## We keep the first frequencies column only; group boundaries are ## in the environment of 'x' y <- x[, 2L] x <- eval(expression(cj), envir = environment(x)) ## If any frequency is non finite, omit the group keep <- which(is.finite(y)) y <- y[keep] x <- x[c(1L, keep + 1L)] ## Some useful values n <- sum(y) # total number of observations h <- diff(x) # group widths dens <- y/(n * h) # group "densities" ## Cannot plot histogram with infinite group if (any(is.infinite(x))) stop("infinite group boundaries") ## The rest is taken from hist.default() xname <- paste(deparse(substitute(x), 500), collapse = "\n") equidist <- diff(range(h)) < 1e-07 * mean(h) if (is.null(freq)) { freq <- if (!missing(probability)) !as.logical(probability) else equidist } else if (!missing(probability) && any(probability == freq)) stop(sprintf("%s is an alias for %s, however they differ.", sQuote("probability"), sQuote("!freq"))) mids <- 0.5 * (x[-1L] + x[-length(x)]) r <- structure(list(breaks = x, counts = y, intensities = dens, density = dens, mids = mids, xname = xname, equidist = equidist), class = "histogram") if (plot) { plot(r, freq = freq, col = col, border = border, angle = angle, density = density, main = main, xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, axes = axes, labels = labels, ...) invisible(r) } else r } actuar/R/hache.barycenter.R0000644000176200001440000001370115147745722015302 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Auxiliary function to fit regression credibility model by ### positioning the intercept at the barycenter of time. ### ### AUTHORS: Xavier Milhaud, Vincent Goulet hache.barycenter <- function(ratios, weights, xreg, method, tol, maxit, echo) { ## Frequently used values weights.s <- rowSums(weights, na.rm = TRUE) # contract total weights has.data <- which(weights.s > 0) # contracts with data ncontracts <- nrow(ratios) # number of contracts eff.ncontracts <- length(has.data) # effective number of contracts p <- ncol(xreg) # rank (>= 2) of design matrix n <- nrow(xreg) # number of observations ## Putting the intercept at the barycenter of time amounts to use ## a "weighted orthogonal" design matrix in the regression (that ## is, X'WX = I for some diagonal weight matrix W). In theory, ## there would be one orthogonal design matrix per contract. In ## practice, we orthogonalize with a "collective" barycenter. We ## use average weights per period across contracts since these ## will be closest to the their individual analogues. ## ## We orthogonalize the original design matrix using QR ## decomposition. We also keep matrix R as a transition matrix ## between the original base and the orthogonal base. w <- colSums(weights, na.rm = TRUE)/sum(weights.s) Xqr <- qr(xreg * sqrt(w)) # QR decomposition R <- qr.R(Xqr) # transition matrix x <- qr.Q(Xqr) / sqrt(w) # weighted orthogonal matrix ## Fit linear model to each contract. For contracts without data, ## fit some sort of empty model to ease use in predict.hache(). f <- function(i) { z <- if (i %in% has.data) # contract with data { y <- ratios[i, ] not.na <- !is.na(y) lm.wfit(x[not.na, , drop = FALSE], y[not.na], weights[i, not.na]) } else # contract without data lm.fit(x, rep.int(0, n)) z[c("coefficients", "residuals", "weights", "rank", "qr")] } fits <- lapply(seq_len(ncontracts), f) ## Individual regression coefficients ind <- sapply(fits, coef) ind[is.na(ind)] <- 0 ## Individual variance estimators. The contribution of contracts ## without data is 0. S <- function(z) # from stats::summary.lm { nQr <- NROW(z$qr$qr) rank <- z$rank r <- z$residuals w <- z$weights sum(w * r^2) / (nQr - rank) } sigma2 <- sapply(fits[has.data], S) sigma2[is.nan(sigma2)] <- 0 ## Initialization of a few containers: p x p x ncontracts arrays ## for the weight and credibility matrices; p x p matrices for the ## between variance-covariance matrix and total weight matrix; a ## vector of length p for the collective regression coefficients. cred <- W <- array(0, c(p, p, ncontracts)) A <- W.s <- matrix(0, p, p) coll <- numeric(p) ## Weight matrices: we need here only the diagonal elements of ## X'WX, where W = diag(w_{ij}) (and not w_{ij}/w_{i.} as in the ## orthogonalization to keep a w_{i.} lying around). The first ## element is w_{i.} and the off-diagonal elements are zero by ## construction. Note that array W is quite different from the one ## in hache.origin(). W[1, 1, ] <- weights.s for (i in 2:p) W[i, i, has.data] <- colSums(t(weights[has.data, ]) * x[, i]^2, na.rm = TRUE) ## === ESTIMATION OF THE WITHIN VARIANCE === s2 <- mean(sigma2) ## === ESTIMATION OF THE BETWEEN VARIANCE-COVARIANCE MATRIX === ## ## By construction, we only estimate the diagonal of the matrix. ## Variance components are estimated just like in the ## Buhlmann-Straub model (see bstraub.R for details). ## ## Should we compute the iterative estimators? do.iter <- method == "iterative" && diff(range(weights, na.rm = TRUE)) > .Machine$double.eps^0.5 ## Do the computations one regression parameter at a time. for (i in seq_len(p)) { ## Unbiased estimator a <- A[i, i] <- bvar.unbiased(ind[i, has.data], W[i, i, has.data], s2, eff.ncontracts) ## Iterative estimator if (do.iter) { a <- A[i, i] <- if (a > 0) bvar.iterative(ind[i, has.data], W[i, i, has.data], s2, eff.ncontracts, start = a, tol = tol, maxit = maxit, echo = echo) else 0 } ## Credibility factors and estimator of the collective ## regression coefficients. if (a > 0) { z <- cred[i, i, has.data] <- 1/(1 + s2/(W[i, i, has.data] * a)) z. <- W.s[i, i] <- sum(z) coll[i] <- drop(crossprod(z, ind[i, has.data])) / z. } else { ## (credibility factors were already initialized to 0) w <- W[i, i, has.data] w. <- W.s[i, i] <- sum(w) coll[i] <- drop(crossprod(w, ind[i, ])) / w. } } ## Credibility adjusted coefficients. The coefficients of the ## models are replaced with these values. That way, prediction ## will be trivial using predict.lm(). for (i in seq_len(ncontracts)) fits[[i]]$coefficients <- coll + drop(cred[, , i] %*% (ind[, i] - coll)) ## Add names to the collective coefficients vector. names(coll) <- rownames(ind) ## Results list(means = list(coll, ind), weights = list(W.s, W), unbiased = if (method == "unbiased") list(A, s2), iterative = if (method == "iterative") list(A, s2), cred = cred, nodes = list(ncontracts), adj.models = fits, transition = R) } actuar/R/grouped.data.R0000644000176200001440000001252315147745722014453 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Creation of grouped data objects. ### ### The function can create a grouped data object from two types of ### arguments. ### ### 1. Individual data. The call has at least two elements in '...'. ### The first is then the vector of group boundaries and the others ### are vectors (or a matrix) of group frequencies. ### ### 2. Group boundaries and frequencies. The call has one or more ### elements in '...' and either 'breaks' or 'nclass' is provided ### or 'group' is TRUE. In this case, elements of '...' are grouped ### using graphics::hist automatically based on the first element ### of '...', or with group boundaries 'breaks' if the latter is a ### vector. ### ### For details on grouped data, see Klugman, Panjer & Willmot, Loss ### Models, Wiley, 1998. ### ### AUTHORS: Vincent Goulet , ### Mathieu Pigeon, Louis-Philippe Pouliot ### ### CREDITS: Manipulation and creation of names taken in part from R ### function data.frame(). Arguments, 'breaks', 'nclass' and their ### treatment taken from R function 'hist'. grouped.data <- function(..., breaks = "Sturges", include.lowest = TRUE, right = TRUE, nclass = NULL, group = FALSE, row.names = NULL, check.rows = FALSE, check.names = TRUE) { ## Utility function to format numbers. numform <- function(x, w) formatC(x, digits = 2, width = w, format = "fg") ## Keep the calls in '...' in object 'ox', and the evaluated ## elements in '...' in object 'x'. ox <- as.list(substitute(list(...)))[-1L] x <- list(...) xlen <- length(x) # number of arguments in '...' use.br <- !missing(breaks) # 'breaks' specified ## If any elements of '...' are unnamed, set names based on the ## variable name provided in the function call (e.g. f(x) -> "x") ## or from the deparsed expression (e.g. f(1:3) -> "1:3"). xnames <- names(x) if(length(xnames) != xlen) xnames <- character(xlen) no.xn <- !nzchar(xnames) if (any(no.xn)) { for (i in which(no.xn)) xnames[i] <- deparse(ox[[i]], nlines = 1L)[1L] names(x) <- xnames } ## Single argument implies individual data. if (xlen == 1L) group <- TRUE ## Avoid using calling 'hist' with 'nclass' specified. if (use.br) { if (!missing(nclass)) warning(sprintf("%s not used when %s is specified", sQuote("nclass"), sQuote("breaks"))) if (!(missing(group) || group)) warning(sprintf("%s ignored when %s is specified", sQuote("group"), sQuote("breaks"))) group <- TRUE } else if (!is.null(nclass) && length(nclass) == 1L) { breaks <- nclass if (!(missing(group) || group)) warning(sprintf("%s ignored when %s is specified", sQuote("group"), sQuote("nclass"))) group <- TRUE } if (group) # individual data in argument; group with 'hist' { ## Set group boudaries (and the first set of group ## frequencies) using the first argument in '...'. y <- hist(x[[1]], plot = FALSE, breaks = breaks, include.lowest = include.lowest, right = right) br <- y$breaks y <- y$counts ## If there are other vectors in '...', compute group ## frequencies using 'hist' with the group boundaries ## determined above. If 'breaks' were set automatically, there ## is a great risk of error, but we can't do much better. if (xlen > 1) { f <- function(x, br) hist(x, plot = FALSE, breaks = br, include.lowest = include.lowest, right = right)$counts y <- cbind(y, sapply(x[-1], f, br = br)) } y <- as.data.frame(y) x <- as.data.frame(br) names(y) <- xnames xnames <- "" nx <- nrow(x) } else # group boundaries and frequencies in argument { y <- as.data.frame(x[-1L]) # group frequencies x <- as.data.frame(x[[1L]]) # group boundaries nx <- nrow(x) ## There must be exactly one more group boundary than frequencies. if (nx - nrow(y) != 1L) stop("invalid number of group boundaries and frequencies") ## Replace missing frequencies by zeros. nax <- is.na(x) if (any(nax)) { x[nax] <- 0 warning("missing frequencies replaced by zeros") } } ## Return a data frame with formatted group boundaries in the ## first column. w <- max(nchar(x[-1L, ])) # longest upper boundary xfmt <- paste(if (right) "(" else "[", numform(x[-nx, ], -1), ", ", numform(x[-1L, ], w), if (right) "]" else ")", sep = "") res <- data.frame(xfmt, y, row.names = row.names, check.rows = check.rows, check.names = check.names) names(res) <- c(xnames[1L], names(y)) class(res) <- c("grouped.data", "data.frame") environment(res) <- new.env() assign("cj", unlist(x, use.names = FALSE), environment(res)) attr(res, "right") <- right res } actuar/R/ZeroTruncatedGeometric.R0000644000176200001440000000141215147745722016521 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}ztgeom functions to compute ### characteristics of the Zero Truncated Geometric distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dztgeom <- function(x, prob, log = FALSE) .External(C_actuar_do_dpq, "dztgeom", x, prob, log) pztgeom <- function(q, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pztgeom", q, prob, lower.tail, log.p) qztgeom <- function(p, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qztgeom", p, prob, lower.tail, log.p) rztgeom <- function(n, prob) .External(C_actuar_do_random, "rztgeom", n, prob) actuar/R/InverseGamma.R0000644000176200001440000000341715147745722014456 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev,mgf}invgamma functions to compute ### characteristics of the Inverse Gamma distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = 1 - pgamma(scale/x, shape, scale = 1) ### ### or, equivalently, ### ### Pr[X <= x] = 1 - pgamma(1/x, shape1, scale = 1/scale). ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Second ### Edition, Wiley, 2004 and ### ### ### AUTHORS: Mathieu Pigeon, Christophe Dutang and ### Vincent Goulet dinvgamma <- function (x, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dinvgamma", x, shape, scale, log) pinvgamma <- function(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvgamma", q, shape, scale, lower.tail, log.p) qinvgamma <- function(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qinvgamma", p, shape, scale, lower.tail, log.p) rinvgamma <- function(n, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rinvgamma", n, shape, scale) minvgamma <- function(order, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "minvgamma", order, shape, scale, FALSE) levinvgamma <- function(limit, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levinvgamma", limit, shape, scale, order, FALSE) mgfinvgamma <- function(t, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "mgfinvgamma", t, shape, scale, log) actuar/R/VaR.R0000644000176200001440000000106315147745722012563 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of a generic function for the Value at Risk. Currently, ### there is no default method for this function. The only method is ### for objects of class 'aggregateDist'. ### ### AUTHORS: Tommy Ouellet, Vincent Goulet VaR <- function(x, ...) UseMethod("VaR") VaR.aggregateDist <- function(x, conf.level = c(0.9, 0.95, 0.99), smooth = FALSE, names = TRUE, ...) quantile.aggregateDist(x, conf.level, smooth, names, ...) actuar/R/ogive.R0000644000176200001440000001077015147745722013211 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Ogive for grouped data. ### ### A default method exists for either a vector of individual data, or ### two vectors of group boundaries and group frequencies. It first ### creates a grouped data object using 'grouped.data' and then calls ### a utility function to create the ogive. ### ### For the definition of the ogive, see Klugman, Panjer & Willmot, ### Loss Models, Wiley, 1998. ### ### More details on the admissible arguments for the default method ### are to be found in ./grouped.data.R. ### ### AUTHORS: Vincent Goulet , ### Mathieu Pigeon ### ### CREDITS: Arguments, 'breaks', 'nclass' and their treatment taken ### from R function hist(). ogive <- function(x, ...) UseMethod("ogive") ogive.default <- function(x, y = NULL, breaks = "Sturges", nclass = NULL, ...) { chkDots(...) # method does not use '...' Call <- match.call() if (exists(".Generic", inherits = FALSE)) Call[[1]] <- as.name(.Generic) ## Avoid using calling 'hist' with 'nclass' specified. if (!missing(breaks)) { if (!missing(nclass)) warning(sprintf("%s not used when %s is specified", sQuote("nclass"), sQuote("breaks"))) } else if (!is.null(nclass) && length(nclass) == 1L) breaks <- nclass ## Create the "grouped.data" object. x <- if (is.null(y)) # one argument: individual data grouped.data(x, breaks = breaks) else # two arguments: boundaries and frequencies grouped.data(x, y) ## Group frequencies in the second column of the data frame; group ## boundaries in the environment of 'x'. y <- x[, 2L] x <- eval(expression(cj), envir = environment(x)) ## Create an object of class 'ogive'. res <- .ogiveFUN(x, y) attr(res, "call") <- Call res } ogive.grouped.data <- function(x, ...) { chkDots(...) # method does not use '...' Call <- match.call() if (exists(".Generic", inherits = FALSE)) Call[[1]] <- as.name(.Generic) ## We keep the first frequencies column only; group boundaries are ## in the environment of 'x' y <- x[, 2L] x <- eval(expression(cj), envir = environment(x)) ## Create an object of class 'ogive'. res <- .ogiveFUN(x, y) attr(res, "call") <- Call res } .ogiveFUN <- function(x, y) { FUN <- approxfun(x, cumsum(c(0, y)) / sum(y), yleft = 0, yright = 1, method = "linear", ties = "ordered") class(FUN) <- c("ogive", class(FUN)) FUN } ### Essentially identical to stats:::print.ecdf. print.ogive <- function(x, digits = getOption("digits") - 2, ...) { ## Utility function numform <- function(x) paste(formatC(x, digits = digits), collapse = ", ") ## The rest is adapted from stats::ecdf cat("Ogive for grouped data \nCall: ") print(attr(x, "call"), ...) nc <- length(xxc <- get("x", envir = environment(x))) nn <- length(xxn <- get("y", envir = environment(x))) i1 <- 1L:min(3L, nc) i2 <- if (nc >= 4L) max(4L, nc - 1L):nc else integer(0) i3 <- 1L:min(3L, nn) i4 <- if (nn >= 4L) max(4L, nn - 1L):nn else integer(0) cat(" x = ", numform(xxc[i1]), if (nc > 3L) ", ", if (nc > 5L) " ..., ", numform(xxc[i2]), "\n", sep = "") cat(" F(x) = ", numform(xxn[i3]), if (nn > 3L) ", ", if (nn > 5L) " ..., ", numform(xxn[i4]), "\n", sep = "") invisible(x) } ### Essentially identical to stats:::summary.ecdf. summary.ogive <- function (object, ...) { n <- length(eval(expression(x), envir = environment(object))) header <- paste("Ogive: ", n, "unique values with summary\n") structure(summary(knots(object), ...), header = header, class = "summary.ogive") } ### Identical to stats:::print.summary.ecdf. print.summary.ogive <- function(x, ...) { cat(attr(x, "header")) y <- x; attr(y, "header") <- NULL; class(y) <- "summaryDefault" print(y, ...) invisible(x) } ### Identical to stats:::knots.stepfun. knots.ogive <- function(Fn, ...) eval(expression(x), envir = environment(Fn)) plot.ogive <- function(x, main = NULL, xlab = "x", ylab = "F(x)", ...) { if (missing(main)) main <- { cl <- attr(x, "call") deparse(if (!is.null(cl)) cl else sys.call()) } kn <- knots(x) Fn <- x(kn) plot(kn, Fn, ..., type = "o", pch = 16, main = main, xlab = xlab, ylab = ylab) } actuar/R/ZeroModifiedBinomial.R0000644000176200001440000000154215147745722016130 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}zmbinom functions to compute ### characteristics of the Zero Modified Binomial distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet dzmbinom <- function (x, size, prob, p0, log = FALSE) .External(C_actuar_do_dpq, "dzmbinom", x, size, prob, p0, log) pzmbinom <- function(q, size, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pzmbinom", q, size, prob, p0, lower.tail, log.p) qzmbinom <- function(p, size, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qzmbinom", p, size, prob, p0, lower.tail, log.p) rzmbinom <- function(n, size, prob, p0) .External(C_actuar_do_random, "rzmbinom", n, size, prob, p0) actuar/R/GammaSupp.R0000644000176200001440000000153715147745722013773 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,lev,mgf}gamma functions to compute raw and ### limited moments, and the moment generating function for ### the Gamma distribution (as defined in R) ### ### See Chapter 17 of Johnson & Kotz, Continuous univariate ### distributions, volume 1, Wiley, 1970 ### ### AUTHORS: Mathieu Pigeon, Christophe Dutang, ### Vincent Goulet mgamma <- function(order, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mgamma", order, shape, scale, FALSE) levgamma <- function(limit, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levgamma", limit, shape, scale, order, FALSE) mgfgamma <- function(t, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "mgfgamma", t, shape, scale, log) actuar/R/Loglogistic.R0000644000176200001440000000260715147745722014357 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}llogis functions to compute ### characteristics of the loglogistic distribution. The version used ### in these functions has cumulative distribution function ### ### Pr[X <= x] = v/(1 + v), v = (x/scale)^shape, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dllogis <- function (x, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dllogis", x, shape, scale, log) pllogis <- function(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pllogis", q, shape, scale, lower.tail, log.p) qllogis <- function(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qllogis", p, shape, scale, lower.tail, log.p) rllogis <- function(n, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rllogis", n, shape, scale) mllogis <- function(order, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mllogis", order, shape, scale, FALSE) levllogis <- function(limit, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levllogis", limit, shape, scale, order, FALSE) actuar/R/InverseParalogistic.R0000644000176200001440000000277615147745722016064 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}invparalogis functions to compute ### characteristics of the Inverse Paralogistic distribution. The ### version used in these functions has cumulative distribution ### function ### ### Pr[X <= x] = (u/(1 + u))^shape, u = (x/scale)^shape, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dinvparalogis <- function (x, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dinvparalogis", x, shape, scale, log) pinvparalogis <- function(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvparalogis", q, shape, scale, lower.tail, log.p) qinvparalogis <- function(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qinvparalogis", p, shape, scale, lower.tail, log.p) rinvparalogis <- function(n, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rinvparalogis", n, shape, scale) minvparalogis <- function(order, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "minvparalogis", order, shape, scale, FALSE) levinvparalogis <- function(limit, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levinvparalogis", limit, shape, scale, order, FALSE) actuar/R/ExponentialSupp.R0000644000176200001440000000140115147745722015225 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,lev,mgf}exp functions to compute raw and ### limited moments, and the moment generating function for the ### Exponential distribution (as defined in R). ### ### See Chapter 18 of Johnson & Kotz, Continuous univariate ### distributions, volume 1, Wiley, 1970 ### ### AUTHORS: Mathieu Pigeon, Christophe Dutang, ### Vincent Goulet mexp <- function(order, rate = 1) .External(C_actuar_do_dpq, "mexp", order, 1/rate, FALSE) levexp <- function(limit, rate = 1, order = 1) .External(C_actuar_do_dpq, "levexp", limit, 1/rate, order, FALSE) mgfexp <- function(t, rate = 1, log = FALSE) .External(C_actuar_do_dpq, "mgfexp", t, 1/rate, log) actuar/R/InversePareto.R0000644000176200001440000000237015147745722014663 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m}invpareto functions to compute ### characteristics of the Inverse Pareto distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = (x/(x + scale))^shape, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dinvpareto <- function(x, shape, scale, log = FALSE) .External(C_actuar_do_dpq, "dinvpareto", x, shape, scale, log) pinvpareto <- function(q, shape, scale, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvpareto", q, shape, scale, lower.tail, log.p) qinvpareto <- function(p, shape, scale, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qinvpareto", p, shape, scale, lower.tail, log.p) rinvpareto <- function(n, shape, scale) .External(C_actuar_do_random, "rinvpareto", n, shape, scale) minvpareto <- function(order, shape, scale) .External(C_actuar_do_dpq, "minvpareto", order, shape, scale, FALSE) levinvpareto <- function(limit, shape, scale, order = 1) .External(C_actuar_do_dpq, "levinvpareto", limit, shape, scale, order, FALSE) actuar/R/hache.R0000644000176200001440000000605215147745722013146 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Credibility in the regression case using the Hachemeister (1975) ### model with possibly an adjustment to put the intercept at the ### barycenter of time (see Buhlmann & Gisler, 2005). ### ### AUTHORS: Xavier Milhaud, Tommy Ouellet, Vincent Goulet ### hache <- function(ratios, weights, formula, data, adj.intercept = FALSE, method = c("unbiased", "iterative"), tol = sqrt(.Machine$double.eps), maxit = 100, echo = FALSE) { Call <- match.call() ## If weights are not specified, use equal weights as in ## Buhlmann's model. if (missing(weights)) { if (any(is.na(ratios))) stop("missing ratios not allowed when weights are not supplied") weights <- array(1, dim(ratios)) } ## Check other bad arguments. if (NCOL(ratios) < 2) stop("there must be at least one node with more than one period of experience") if (NROW(ratios) < 2) stop("there must be more than one node") if (!identical(which(is.na(ratios)), which(is.na(weights)))) stop("missing values are not in the same positions in 'weights' and in 'ratios'") if (all(!weights, na.rm = TRUE)) stop("no available data to fit model") ## Build the design matrix mf <- model.frame(formula, data, drop.unused.levels = TRUE) mt <- attr(mf, "terms") xreg <- model.matrix(mt, mf) ## Do computations in auxiliary functions. res <- if (adj.intercept) hache.barycenter(ratios, weights, xreg, method = match.arg(method), tol = tol, maxit = maxit, echo = echo) else hache.origin(ratios, weights, xreg, tol = tol, maxit = maxit, echo = echo) ## Add the terms object to the result for use in predict.hache() ## [and thus predict.lm()]. res$terms <- mt ## Results attr(res, "class") <- "hache" attr(res, "model") <- "regression" res } predict.hache <- function(object, levels = NULL, newdata, ...) { ## If model was fitted at the barycenter of time (there is a ## transition matrix in the object), then also convert the ## regression coefficients in the base of the (original) design ## matrix. if (!is.null(R <- object$transition)) { for (i in seq_along(object$adj.models)) { b <- coefficients(object$adj.models[[i]]) object$adj.models[[i]]$coefficients <- solve(R, b) } } ## Prediction (credibility premiums) using predict.lm() on each of ## the adjusted individual models. This first requires to add a ## 'terms' component to each adjusted model. f <- function(z, ...) { z$terms <- object$terms class(z) <- "lm" # to keep predict.lm() quiet unname(predict.lm(z, ...)) } structure(sapply(object$adj.models, f, newdata = newdata), ...) } print.hache <- function(x, ...) print.default(x) actuar/R/NormalSupp.R0000644000176200001440000000116215147745722014173 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,mgf}norm functions to compute raw and the ### moment generating function for the Normal distribution (as defined ### in R). ### ### See Chapter 13 of Johnson & Kotz, Continuous univariate ### distributions, volume 1, Wiley, 1970 ### ### AUTHORS: Christophe Dutang, Vincent Goulet mnorm <- function(order, mean = 0, sd = 1) .External(C_actuar_do_dpq, "mnorm", order, mean, sd, FALSE) mgfnorm <- function(t, mean = 0, sd = 1, log = FALSE) .External(C_actuar_do_dpq, "mgfnorm", t, mean, sd, log) actuar/R/quantile.grouped.data.R0000644000176200001440000000240215147745722016267 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Quantiles (inverse of the ogive) for grouped data ### ### AUTHOR: Vincent Goulet ### Walter Garcia-Fontes quantile.grouped.data <- function(x, probs = seq(0, 1, 0.25), names = TRUE, ...) { ## We keep the first frequencies column only; group boundaries are ## in the environment of 'x' y <- x[, 2L] x <- eval(expression(cj), envir = environment(x)) ## Inverse of the ogive fun <- approxfun(c(0, cumsum(y))/sum(y), x, yleft = min(x), yright = max(x), method = "linear", ties = "ordered") ## Quantiles res <- fun(probs) if (names) { dig <- max(2, getOption("digits")) names(res) <- formatC(paste(100 * probs, "%", sep = ""), format = "fg", width = 1, digits = dig) } res } summary.grouped.data <- function(object, ...) { ## Keep only the first frequencies column object <- object[1L:2L] res <- quantile(object) res <- c(res[1L:3L], mean(object), res[4L:5L]) names(res) <- c("Min.", "1st Qu.", "Median", "Mean", "3rd Qu.", "Max.") class(res) <- c("summaryDefault", "table") res } actuar/R/Burr.R0000644000176200001440000000300215147745722013000 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}burr functions to compute ### characteristics of the Burr distribution. The version used in ### these functions has cumulative distribution function ### ### Pr[X <= x] = 1 - (1/(1 + (x/scale)^shape2))^shape1, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dburr <- function (x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dburr", x, shape1, shape2, scale, log) pburr <- function(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pburr", q, shape1, shape2, scale, lower.tail, log.p) qburr <- function(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qburr", p, shape1, shape2, scale, lower.tail, log.p) rburr <- function(n, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rburr", n, shape1, shape2, scale) mburr <- function(order, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mburr", order, shape1, shape2, scale, FALSE) levburr <- function(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levburr", limit, shape1, shape2, scale, order, FALSE) actuar/R/Pareto3.R0000644000176200001440000000265715147745722013422 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}pareto3 functions to compute ### characteristics of the Pareto (type) II distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = v/(1 + v), x > min, ### ### where v = ((x - min)/scale)^shape. ### ### See Arnold, B. C. (2015), Pareto Distributions, Second Edition, ### CRC Press. ### ### AUTHOR: Vincent Goulet dpareto3 <- function (x, min, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dpareto3", x, min, shape, scale, log) ppareto3 <- function (q, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ppareto3", q, min, shape, scale, lower.tail, log.p) qpareto3 <- function (p, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qpareto3", p, min, shape, scale, lower.tail, log.p) rpareto3 <- function(n, min, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rpareto3", n, min, shape, scale) mpareto3 <- function(order, min, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mpareto3", order, min, shape, scale, FALSE) levpareto3 <- function(limit, min, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levpareto3", limit, min, shape, scale, order, FALSE) actuar/R/FellerPareto.R0000644000176200001440000000357715147745722014473 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}fpareto functions to compute ### characteristics of the Feller-Pareto distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = Pr[Y <= v/(1 + v)], x > min, ### ### where v = ((x - min)/scale)^shape2 and Y has a Beta distribution ### with parameters shape3 and shape1. ### ### See Arnold, B. C. (2015), Pareto Distributions, Second Edition, ### CRC Press. ### ### AUTHORS: Nicholas Langevin, Vincent Goulet dfpareto <- function (x, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dfpareto", x, min, shape1, shape2, shape3, scale, log) pfpareto <- function (q, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pfpareto", q, min, shape1, shape2, shape3, scale, lower.tail, log.p) qfpareto <- function (p, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qfpareto", p, min, shape1, shape2, shape3, scale, lower.tail, log.p) rfpareto <- function (n, min, shape1, shape2, shape3, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rfpareto", n, min, shape1, shape2, shape3, scale) mfpareto <- function (order, min, shape1, shape2, shape3, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mfpareto", order, min, shape1, shape2, shape3, scale, FALSE) levfpareto <- function (limit, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levfpareto", limit, min, shape1, shape2, shape3, scale, order, FALSE) actuar/R/Logarithmic.R0000644000176200001440000000260715147745722014342 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,r}logarithmic functions to compute ### characteristics of the logarithmic distribution. The version used ### in these functions has probability mass function ### ### Pr[X = x] = -p^x/(x log(1 - p)), x = 1, 2, ... ### ### This is the standard parametrization in the literature; see for ### example https://en.wikipedia.org/wiki/Logarithmic_distribution. ### ### NOTE: Klugman, Panjer & Willmot (Loss Models) introduce the ### logarithmic distribution as a limiting case of the zero truncated ### negative binomial. In this setting, parameter 'p' above would be ### the probability of *failure* (a.k.a. q) of the zero truncated ### negative binomial. ### ### AUTHOR: Vincent Goulet dlogarithmic <- function(x, prob, log = FALSE) .External(C_actuar_do_dpq, "dlogarithmic", x, prob, log) plogarithmic <- function(q, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "plogarithmic", q, prob, lower.tail, log.p) qlogarithmic <- function(p, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qlogarithmic", p, prob, lower.tail, log.p) rlogarithmic <- function(n, prob) .External(C_actuar_do_random, "rlogarithmic", n, prob) ## not exported; for internal use in panjer() pgflogarithmic <- function(x, prob) log1p(-prob * x)/log1p(-prob) actuar/R/rcomphierarc.summaries.R0000644000176200001440000001527315147745722016565 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Computing summary statistics and accessing components of a ### portfolio. ### ### AUTHORS: Louis-Philippe Pouliot, Tommy Ouellet, ### Vincent Goulet aggregate.portfolio <- function(x, by = names(x$nodes), FUN = sum, classification = TRUE, prefix = NULL, ...) { level.names <- names(x$nodes) # level names nlevels <- length(level.names) # number of levels years <- level.names[nlevels] # name of last level ## Match level names in 'by' to those in the model by <- match.arg(by, level.names, several.ok = TRUE) ## Version of FUN able to work on lists fun <- function(x, ...) FUN(unlist(x), ...) ## The most common case should be to aggregate claim amounts by ## node. This case being very simple, it is treated separately. if (identical(by, level.names)) return(cbind(if (classification) x$classification, array(sapply(x$data, FUN, ...), dim(x$data), dimnames = list(NULL, paste(prefix, colnames(x$data), sep = ""))))) ## Summaries only by last level (years) are also simple to handle. if (identical(by, years)) { res <- apply(x$data, 2, fun, ...) names(res) <- paste(prefix, colnames(x$data), sep = "") return(res) } ## The other possibilities require to split the data in groups as ## specified in argument 'by'. If the last level (years) is in ## 'by', then the matrix structure must be retained to make the ## summaries. Otherwise, it can just be dropped since summaries ## will span the years of observation. ## ## Convert the sequence of subscripts into factors by pasting the ## digits together. It is important *not* to sort the levels in ## case the levels in 'by' are not in the same order as in ## 'level.names'. rows <- setdiff(by, years) # groups other than years s <- x$classification[, rows, drop = FALSE] # subscripts f <- apply(s, 1, paste, collapse = "") # grouping IDs f <- factor(f, levels = unique(f)) # factors s <- s[match(levels(f), f), , drop = FALSE] # unique subscripts xx <- split(x$data, f) # split data ## Make summaries if (years %in% by) { xx <- lapply(xx, matrix, ncol = ncol(x$data)) res <- t(sapply(xx, function(x, ...) apply(x, 2, fun, ...), ...)) cols <- colnames(x$data) } else { res <- sapply(xx, fun, ...) cols <- deparse(substitute(FUN)) } ## Return results as a matrix structure(cbind(if (classification) s, res), dimnames = list(NULL, c(if (classification) rows, paste(prefix, cols, sep = "")))) } frequency.portfolio <- function(x, by = names(x$nodes), classification = TRUE, prefix = NULL, ...) { chkDots(...) # method does not use '...' freq <- function(x) if (identical(x, NA)) NA else length(x[!is.na(x)]) aggregate(x, by, freq, classification, prefix) } severity.portfolio <- function(x, by = head(names(x$node), -1), splitcol = NULL, classification = TRUE, prefix = NULL, ...) { chkDots(...) # method does not use '...' level.names <- names(x$nodes) # level names ci <- seq_len(ncol(x$data)) # column indexes ## Match level names in 'by' to those in the model by <- match.arg(by, level.names, several.ok = TRUE) ## Sanity checks if (identical(by, level.names)) { warning("nothing to do") return(x) } ## Convert character 'splitcol' to numeric and then from numeric ## or NULL to boolean. if (is.character(splitcol)) splitcol <- pmatch(splitcol, colnames(x$data), duplicates.ok = TRUE) if (is.numeric(splitcol) || is.null(splitcol)) splitcol <- ci %in% splitcol ## Unroll claim amounts by column; simplest case if (tail(level.names, 1L) %in% by) { if (length(by) > 1L) stop(sprintf("invalid %s specification", sQuote("by"))) #x <- x$data res <- unroll(x$data, bycol = TRUE, drop = FALSE) colnames(res) <- paste(prefix, colnames(res), sep = "") return(list(main = res[, !splitcol], split = if (all(!splitcol)) NULL else res[, splitcol])) } ## Unrolling per row (or group of rows) is more work. It requires ## to split the columns of the matrix first, and then to apply the ## unrolling procedure twice (if 'splitcol' != NULL). ## ## Utility function fun <- function(x) unlist(x[!is.na(x)]) ## Split rows according to the 'by' argument. s <- x$classification[, by, drop = FALSE] # subscripts f <- apply(s, 1, paste, collapse = "") # grouping IDs f <- factor(f, levels = unique(f)) # factors s <- s[match(levels(f), f), , drop = FALSE] # unique subscripts ## Keep the 'splitcol' columns for later use. x.split <- x$data[, splitcol] ## If a prefix is not specified, use "claim." as a sensible ## choice. if (is.null(prefix)) prefix <- "claim." ## Unroll the "main" block of columns. if (all(splitcol)) res.main <- NULL else { x <- cbind(lapply(split(x$data[, !splitcol], f), fun)) res.main <- unroll(x, bycol = FALSE, drop = FALSE) res.main <- if (0L < (nc <- ncol(res.main))) { dimnames(res.main) <- list(NULL, paste(prefix, seq_len(nc), sep = "")) cbind(if (classification) s, res.main) } else NULL } ## Unroll the 'splitcol' block of columns. if (all(!splitcol)) res.split <- NULL else { x <- cbind(lapply(split(x.split, f), fun)) # split data res.split <- unroll(x, bycol = FALSE, drop = FALSE) res.split <- if (0L < (nc <- ncol(res.split))) { dimnames(res.split) <- list(NULL, paste(prefix, seq_len(nc), sep = "")) cbind(if (classification) s, res.split) } else NULL } ## Return the result as a list. list(main = res.main, split = res.split) } weights.portfolio <- function(object, classification = TRUE, prefix = NULL, ...) { chkDots(...) # method does not use '...' if (is.null(object$weights)) NULL else { w <- object$weights colnames(w) <- paste(prefix, colnames(w), sep = "") cbind(if (classification) object$classification, w) } } actuar/R/adjCoef.R0000644000176200001440000002060115147745722013425 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Compute the adjustment coefficient in ruin theory, that is the ### smallest (strictly) positive root of the Lundberg equation ### ### h(r) = E[e^(r X - r c W)] = 1, ### ### where X is the claim size random variable, W the inter-occurence ### time and c the premium rate. ### ### AUTHORS: Christophe Dutang, Vincent Goulet adjCoef <- function(mgf.claim, mgf.wait = mgfexp, premium.rate, upper.bound, h, reinsurance = c("none", "proportional", "excess-of-loss"), from, to, n = 101) { reinsurance <- match.arg(reinsurance) ## Sanity check if (missing(mgf.claim) && missing(h)) stop(sprintf("one of %s or %s is needed", sQuote("mgf.claim"), sQuote("h"))) ## === NO REINSURANCE CASE === ## ## Moment generating functions are unidimensional, premium rate ## and adjustment coefficient are both single numeric values. if (reinsurance == "none") { ## For each of 'mgf.claim', 'mgf.wait' and 'h' (if needed): if ## the expression is only the name of a function (say f), ## build a call 'f(x)'. Otherwise, check that the expression ## is a function call containing an 'x'. Taken from 'curve' ## and 'discretize'. ## ## NOTE: argument 'h' will be used iff 'mgf.claim' is missing, ## thereby giving priority to 'mgf.claim'. if (missing(mgf.claim)) { sh <- substitute(h) if (is.name(sh)) { fcall <- paste(sh, "(x)") h1 <- function(x) eval(parse(text = fcall), envir = list(x = x), enclos = parent.frame(2)) } else { if (!(is.call(sh) && match("x", all.vars(sh), nomatch = 0L))) stop(sprintf("%s must be a function or an expression containing %s", sQuote("h"), sQuote("x"))) h1 <- function(x) eval(sh, envir = list(x = x), enclos = parent.frame(2)) } } else { smgfx <- substitute(mgf.claim) if (is.name(smgfx)) { fcall <- paste(smgfx, "(x)") mgfx <- parse(text = fcall) } else { if (!(is.call(smgfx) && match("x", all.vars(smgfx), nomatch = 0L))) stop(sprintf("%s must be a function or an expression containing %s", sQuote("mgf.claim"), sQuote("x"))) mgfx <- smgfx } smgfw <- substitute(mgf.wait) if (is.name(smgfw)) { fcall <- paste(smgfw, "(x)") mgfw <- parse(text = fcall) } else { if (!(is.call(smgfw) && match("x", all.vars(smgfw), nomatch = 0L))) stop(sprintf("%s must be a function or an expression containing %s", sQuote("mgf.wait"), sQuote("x"))) mgfw <- smgfw } h1 <- function(x) eval(mgfx) * eval(mgfw, list(x = -x * premium.rate)) } f1 <- function(r) (h1(r) - 1)^2 return(optimize(f1, c(0, upper.bound - .Machine$double.eps), tol = sqrt(.Machine$double.eps))$minimum) } ## === WITH REINSURANCE CASES === ## ## Claim amount moment generating function is a function of 'x' ## and the retention level 'y', inter-occurence time moment ## generating function is a function of 'x', premium rate and ## adjustment coefficient are both functions of the retention ## level 'y'. ## ## Do same as in the no reinsurance case for each of 'mgf.claim', ## 'mgf.wait' and 'h' (if needed) and also 'premium'. The first ## must be functions of 'x' and 'y', whereas the last one is a ## function of 'y' only. if (missing(mgf.claim)) { sh <- substitute(h) if (is.name(sh)) { fcall <- paste(sh, "(x, y)") h2 <- function(x, y) eval(parse(text = fcall), envir = list(x = x, y = y), enclos = parent.frame(2)) } else { if (!(is.call(sh) && all(match(c("x", "y"), all.vars(sh), nomatch = 0L)))) stop(sprintf("%s must be a function or an expression containing %s and %s", sQuote("h"), sQuote("x"), sQuote("y"))) h2 <- function(x, y) eval(sh, envir = list(x = x, y = y), enclos = parent.frame(2)) } } else { if (!is.function(premium.rate)) stop(sprintf("%s must be a function when using reinsurance", sQuote("premium.rate"))) smgfx <- substitute(mgf.claim) if (is.name(smgfx)) { fcall <- paste(smgfx, "(x, y)") mgfx <- parse(text = fcall) } else { if (!(is.call(smgfx) && all(match(c("x", "y"), all.vars(smgfx), nomatch = 0L)))) stop(sprintf("%s must be a function or an expression containing %s and %s", sQuote("mgf.claim"), sQuote("x"), sQuote("y"))) mgfx <- smgfx } smgfw <- substitute(mgf.wait) if (is.name(smgfw)) { fcall <- paste(smgfw, "(x)") mgfw <- parse(text = fcall) } else { if (!(is.call(smgfw) && match("x", all.vars(smgfw), nomatch = 0L))) stop(sprintf("%s must be a function or an expression containing %s", sQuote("mgf.wait"), sQuote("x"))) mgfw <- smgfw } spremium <- substitute(premium.rate) if (is.name(spremium)) { fcall <- paste(spremium, "(y)") premium.rate <- parse(text = fcall) } else { if (!(is.call(spremium) && match("y", all.vars(spremium), nomatch = 0L))) stop(sprintf("%s must be a function or an expression containing %s", sQuote("premium.rate"), sQuote("y"))) premium.rate <- spremium } h2 <- function(x, y) eval(mgfx) * eval(mgfw, list(x = -x * eval(premium.rate))) } f2 <- function(x, y) (h2(x, y) - 1)^2 retention <- seq(from, to, length.out = n) ## Compute the adjustment coefficient for each retention level. ## The output of 'sapply' is a matrix with minima in the first ## line. ## ## The sapply() below passes the retention levels (argument 'y' of ## function 'f') to optimize(). Since the first two arguments ('f' ## and 'interval') of the latter function are specified, the ## retention levels end up in '...' and hence are considered as ## second argument of 'f'. *This requires R >= 2.6.0 to work since ## argument '...' comes much earlier in the definition of ## optimize(). coef <- sapply(retention, optimize, f = f2, interval = c(0, upper.bound-.Machine$double.eps), tol = sqrt(.Machine$double.eps))[1L, ] ## Make a function from the (retention, coefficient) pairs ## computed above, joining the points by straight line segments. FUN <- approxfun(retention, coef, rule = 2, method = "linear") comment(FUN) <- paste(toupper(substring(reinsurance, 1L, 1L)), substring(reinsurance, 2L), " reinsurance", sep = "", collapse = "") class(FUN) <- c("adjCoef", class(FUN)) attr(FUN, "call") <- sys.call() FUN } plot.adjCoef <- function(x, xlab = "x", ylab = "R(x)", main = "Adjustment Coefficient", sub = comment(x), type = "l", add = FALSE, ...) { xx <- eval(expression(x), envir = environment(x)) yy <- eval(expression(y), envir = environment(x)) if (add) lines(xx, yy, ..., main = main, xlab = xlab, ylab = ylab, type = type) else plot(xx, yy, ..., main = main, xlab = xlab, ylab = ylab, type = type) mtext(sub, line = 0.5) } actuar/R/Paralogistic.R0000644000176200001440000000267515147745722014526 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}paralogis functions to compute ### characteristics of the paralogistic distribution. The version used ### in these functions has cumulative distribution function ### ### Pr[X <= x] = 1 - (1/(1 + (x/scale)^shape))^shape, x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dparalogis <- function (x, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dparalogis", x, shape, scale, log) pparalogis <- function(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pparalogis", q, shape, scale, lower.tail, log.p) qparalogis <- function(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qparalogis", p, shape, scale, lower.tail, log.p) rparalogis <- function(n, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rparalogis", n, shape, scale) mparalogis <- function(order, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mparalogis", order, shape, scale, FALSE) levparalogis <- function(limit, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levparalogis", limit, shape, scale, order, FALSE) actuar/R/rcomphierarc.R0000644000176200001440000003115015147745722014551 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Simulation of hierarchical portfolios of data. Claim number and ### claim amounts in any given node are simulated independently. Both ### frequency and severity models can be mixtures of distributions. ### ### In the code here, numbering of levels starts at 1 at the data ### level, whereas in common discussion of hierarchical models the ### data level is numbered 0. ### ### AUTHORS: Vincent Goulet , ### Sebastien Auclair and Louis-Philippe Pouliot rcomphierarc <- function(nodes, model.freq = NULL, model.sev = NULL, weights = NULL) { ## Get level names. Each could be NULL. level.names <- names(nodes) freq.names <- names(model.freq) sev.names <- names(model.sev) ## 'nodes' must be a named list. One exception is allowed: there ## is only one level. In this case, add a predetermined name if ## there isn't one already and make sure 'nodes' is a list. if (length(nodes) == 1L) { if (is.null(level.names)) names(nodes) <- "X" nodes <- as.list(nodes) } else { if (!is.list(nodes) || is.null(level.names)) stop(sprintf("%s must be a named list", sQuote("nodes"))) } ## Determine if frequency and severity models are present. Keep ## for future use. has.freq <- !all(sapply(model.freq, is.null)) has.sev <- !all(sapply(model.sev, is.null)) ## Check that at least one of 'model.freq' or 'model.sev' is ## present and that the level names match with those of 'nodes'. ## Perhaps is there a fancier way to do all these tests, but the ## version below is at least easy to follow. if (has.freq) { if (has.sev) { if (! (identical(level.names, freq.names) && identical(level.names, sev.names))) stop(sprintf("level names different in %s, %s and %s", sQuote("nodes"), sQuote("model.freq"), sQuote("model.sev"))) } else { if (!identical(level.names, freq.names)) stop(sprintf("level names different in %s, %s and %s", sQuote("nodes"), sQuote("model.freq"), sQuote("model.sev"))) } } else { if (has.sev) { if (!identical(level.names, sev.names)) stop(sprintf("level names different in %s, %s and %s", sQuote("nodes"), sQuote("model.freq"), sQuote("model.sev"))) } else stop(sprintf("one of %s or %s must be non-NULL", sQuote("model.freq"), sQuote("model.sev"))) } ## The function is written for models with at least two levels ## (entity and year). If there is only one, add a dummy level to ## avoid scattering the code with conditions. if (length(nodes) < 2L) { nodes <- c(node = 1, nodes) model.freq <- if (has.freq) c(expression(node = NULL), model.freq) else NULL model.sev <- if (has.sev) c(expression(node = NULL), model.sev) else NULL } ## Frequently used quantities level.names <- names(nodes) # need to reset! nlevels <- length(nodes) # number of levels ## Recycling of the number of nodes (if needed) must be done ## "manually". We do it here once and for all since in any case ## below we will need to know the total number of nodes in the ## portfolio. Furthermore, the recycled list 'nodes' will be ## returned by the function. for (i in 2L:nlevels) # first node doesn't need recycling nodes[[i]] <- rep(nodes[[i]], length = sum(nodes[[i - 1L]])) ## Simulation of the frequency mixing parameters for each level ## (e.g. class, contract) and, at the last level, the actual ## frequencies. If 'model.freq' is NULL, this is equivalent to ## having one claim per node. if (has.freq) { ## Normally, only the immediately above mixing parameter will ## be used in the model for a level, but the code here allows ## for more general schemes. For this to work, all mixing ## parameters have to be correctly recycled at each level ## where they *could* be used. Since the model at any level ## could be NULL, 'params' will keep track of the mixing ## parameters that were simulated in previous iteration of the ## forthcoming loop. params <- character(0) for (i in seq_len(nlevels)) { ## Number of nodes at the current level n.current <- nodes[[i]] ## Extract simulation model for the level. Call <- model.freq[[i]] ## Repeat the mixing parameters of all levels above the ## current one that were simulated in the past. for (j in seq_along(params)) eval(substitute(x <- rep.int(x, n.current), list(x = as.name(params[[j]])))) ## Simulate data only if there is a model at the current ## level. if (!is.null(Call)) { ## Add the number of variates to the call. Call$n <- sum(n.current) ## Simulation of the mixing parameters or the data. In ## the latter case, store the results in a fixed ## variable name. if (i < nlevels) { assign(level.names[[i]], eval(Call)) params[i] <- level.names[[i]] # remember the parameter } else frequencies <- eval(Call) } } } else frequencies <- rep.int(1, sum(nodes[[nlevels]])) ## Simulation of the claim amounts. If 'model.sev' is NULL, this ## is equivalent to simulating frequencies only. if (has.sev) { ## Repeat the same procedure as for the frequency model, with ## one difference: when reaching the last level (claim ## amounts), the number of variates to simulate is not given ## by the number of nodes but rather by the number of claims ## as found in 'frequencies'. params <- character(0) for (i in seq_len(nlevels)) { n.current <- nodes[[i]] Call <- model.sev[[i]] for (j in seq_along(params)) eval(substitute(x <- rep.int(x, n.current), list(x = as.name(params[[j]])))) if (!is.null(Call)) { ## The rest of the procedure differs depending if we ## are still simulating mixing parameters or claim ## amounts. if (i < nlevels) { ## Simulation of mixing parameters is identical to the ## simulation of frequencies. Call$n <- sum(n.current) assign(level.names[[i]], eval(Call)) params[i] <- level.names[[i]] } else { ## For the simulation of claim amounts, the number ## of variates is rather given by the ## 'frequencies' object. Furthermore, the mixing ## parameters must be recycled once more to match ## the vector of frequencies. for (p in intersect(all.vars(Call), params)) eval(substitute(x <- rep.int(x, frequencies), list(x = as.name(p)))) Call$n <- sum(frequencies) severities <-eval(Call) } } } } else severities <- rep.int(1, sum(frequencies)) ## We must now distribute the claim amounts in vector 'severities' ## to the appropriate nodes. This is complicated by the ## possibility to have different number of nodes (years of ## observation) for each entity. The result must be a matrix ## with the number of columns equal to the maximum number of last ## level nodes. ## ## The number of nodes (years of observation) per entity is ## given by 'n.current' since we reached the last level in (either ## one of) the above loops. ## ## Assign a unique ID to each node, leaving gaps for nodes without ## observations. ind <- unlist(mapply(seq, from = seq(by = max(n.current), along = n.current), length = n.current)) ## Repeating the vector of IDs according to the frequencies ## effectively assigns a node ID to each claim amount. The vector ## of claim amounts is then split by node, yielding a list where ## each element corresponds to a node with claims. f <- rep.int(ind, frequencies) severities <- split(severities, f) ## Identify nodes with frequency equal to 0, which is different ## from having no observation (NA). freq0 <- ind[which(frequencies == 0)] ## Rearrange the list of claim amounts in a matrix; ## ## number of rows: number of nodes at the penultimate level ## (number of entities) ## number of columns: maximum number of nodes at the last level ## (number of years of observation). ## ## Moreover, assign a value of 'numeric(0)' to nodes with a ## frequency of 0. nrow <- length(n.current) # number of entities ncol <- max(n.current) # number of years res <- as.list(rep.int(NA, nrow * ncol)) res[unique(f)] <- severities res[freq0] <- lapply(rep.int(0, length(freq0)), numeric) res <- matrix(res, nrow, ncol, byrow = TRUE, dimnames = list(NULL, paste(level.names[nlevels], seq_len(ncol), sep = "."))) ## Reshape weights as a matrix, if necessary. weights <- if (is.null(weights)) NULL else { ## Integrate NAs into the weights matrix as appropriate. w <- rep.int(NA, nrow * ncol) w[ind] <- weights matrix(w, nrow = nrow, byrow = TRUE, dimnames = dimnames(res)) } ## Finally, create a matrix where each row contains the series of ## identifiers for an entity in the portfolio, e.g. if the data ## is denoted X_{ijkt}, one line of the matrix will contain ## subscripts i, j and k. As we move from right to left in the ## columns of 'm', the subcripts are increasingly repeated. ncol <- nlevels - 1L m <- matrix(1, nrow, ncol, dimnames = list(NULL, head(level.names, ncol))) for (i in seq_len(ncol - 1L)) # all but the last column { ## Vector 'x' will originally contain all subscripts for one ## level. These subscripts are then repeated as needed to give ## the desired result. To avoid another explicit loop, I use a ## 'lapply' with a direct assignment in the current ## frame. Somewhat unusual, but this is the simplest procedure ## I managed to come up with. x <- unlist(lapply(nodes[[i]], seq)) lapply(nodes[(i + 1L):(nlevels - 1L)], function(v) assign("x", rep.int(x, v), envir = parent.frame(2))) m[, i] <- x } m[, ncol] <- unlist(lapply(nodes[[ncol]], seq)) # last column ## Return object of class 'portfolio' structure(list(data = res, weights = weights, classification = m, nodes = nodes, model.freq = model.freq, model.sev = model.sev), class = "portfolio") } ### Alias for backward compatibility with actuar < 2.0-0. simul <- rcomphierarc ### 'print' method for 'portfolio' objects print.portfolio <- function(x, ...) { cat("\nPortfolio of claim amounts \n\n") nn <- names(x$nodes) nc <- max(nchar(nn)) if (!is.null(x$model.freq)) { cat(" Frequency model\n") cat(paste(" ", format(nn, width = nc), " ~ ", x$model.freq, "\n", sep = ""), sep = "") } if (!is.null(x$model.sev)) { cat(" Severity model\n") cat(paste(" ", format(nn, width = nc), " ~ ", x$model.sev, "\n", sep = ""), sep = "") } cat("\n Number of claims per node: \n\n") print(frequency(x), ...) invisible(x) } actuar/R/ChisqSupp.R0000644000176200001440000000140515147745722014012 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,lev,mgf}chisq functions to compute raw and ### limited moments, and the moment generating function for ### the Chi-square distribution (as defined in R) ### ### See Chapter 17 of Johnson & Kotz, Continuous univariate ### distributions, volume 1, Wiley, 1970 ### ### AUTHORS: Christophe Dutang, Vincent Goulet mchisq <- function(order, df, ncp = 0) .External(C_actuar_do_dpq, "mchisq", order, df, ncp, FALSE) levchisq <- function(limit, df, ncp = 0, order = 1) .External(C_actuar_do_dpq, "levchisq", limit, df, ncp, order, FALSE) mgfchisq <- function(t, df, ncp = 0, log = FALSE) .External(C_actuar_do_dpq, "mgfchisq", t, df, ncp, log) actuar/R/PoissonInverseGaussian.R0000644000176200001440000000215415147745722016556 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}poisinvgauss functions to compute ### characteristics of the Poisson-Inverse Gaussian discrete ### distribution. ### ### AUTHOR: Vincent Goulet dpoisinvgauss <- function(x, mean, shape = 1, dispersion = 1/shape, log = FALSE) .External(C_actuar_do_dpq, "dpoisinvgauss", x, mean, dispersion, log) ppoisinvgauss <- function(q, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ppoisinvgauss", q, mean, dispersion, lower.tail, log.p) qpoisinvgauss <- function(p, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qpoisinvgauss", p, mean, dispersion, lower.tail, log.p) rpoisinvgauss <- function(n, mean, shape = 1, dispersion = 1/shape) .External(C_actuar_do_random, "rpoisinvgauss", n, mean, dispersion) ## Aliases dpig <- dpoisinvgauss ppig <- ppoisinvgauss qpig <- qpoisinvgauss rpig <- rpoisinvgauss actuar/R/BetaMoments.R0000644000176200001440000000117115147745722014311 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,lev}beta functions to compute raw and limited ### moments for the Beta distribution (as defined in R). The ### noncentral beta distribution is _not_ supported. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHOR: Vincent Goulet mbeta <- function(order, shape1, shape2) .External(C_actuar_do_dpq, "mbeta", order, shape1, shape2, FALSE) levbeta <- function(limit, shape1, shape2, order = 1) .External(C_actuar_do_dpq, "levbeta", limit, shape1, shape2, order, FALSE) actuar/R/normal.R0000644000176200001440000000301215147745722013357 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Normal and Normal Power Approximation of the total amount of ### claims distribution ### ### See Dayken, Pentikanen and Pesonen, Practical Risk Theory for ### Actuaries, Chapman & Hall, 1994. ### ### AUTHORS: Vincent Goulet ### and Louis-Philippe Pouliot normal <- function(mean, variance) { ## Approximate the total amount of claims distribution using the first ## two moments. FUN <- function(x) pnorm(x, mean = mean, sd = sqrt(variance)) environment(FUN) <- new.env() assign("mean", mean, envir = environment(FUN)) assign("variance", variance, envir = environment(FUN)) attr(FUN, "source") <- "function(x) pnorm(x, mean = mean, sd = sqrt(variance))" FUN } npower <- function(mean, variance, skewness) { ## Approximate the total amount of claims distribution using the first ## three moments. FUN <- function(x) ifelse(x <= mean, NA, pnorm(sqrt(1 + 9/skewness^2 + 6 * (x - mean)/(sqrt(variance) * skewness)) - 3/skewness)) environment(FUN) <- new.env() assign("mean", mean, envir = environment(FUN)) assign("variance", variance, envir = environment(FUN)) assign("skewness", skewness, envir = environment(FUN)) attr(FUN, "source") <- "function(x) ifelse(x <= mean, NA, pnorm(sqrt(1 + 9/skewness^2 + 6 * (x - mean)/(sqrt(variance) * skewness)) - 3/skewness))" FUN } actuar/R/discretize.R0000644000176200001440000001033115147745722014236 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Function to discretize a continuous distribution using various ### methods. ### ### AUTHOR: Vincent Goulet discretize <- function (cdf, from, to, step = 1, method = c("upper", "lower", "rounding", "unbiased"), lev, by = step, xlim = NULL) { method <- match.arg(method) ## If 'cdf' is only the name of a function (say f), build a call ## 'f(x)'. Otherwise, check that the expression is a function call ## containing an 'x'. Taken from 'curve'. scdf <- substitute(cdf) if (is.name(scdf)) { fcall <- paste(scdf, "(x)") cdf <- parse(text = fcall) } else { if (!(is.call(scdf) && match("x", all.vars(scdf), nomatch = 0))) stop(sprintf("%s must be a function or an expression containing %s", sQuote("cdf"), sQuote("x"))) cdf <- scdf } ## If 'from' and/or 'to' are not specified, take their values in 'xlim'. if (missing(from)) from <- xlim[1] if (missing(to)) to <- xlim[2] if (method %in% c("upper", "lower")) { ## The "upper" discretization method assigns to point x = ## from, from + step, ..., to - step the probability mass F(x ## + step) - F(x). ## ## The "lower" discretization method assigns to point x = from ## the probability mass 0 and to x = from + step, ..., to the ## probability mass F(x) - F(x - step). ## ## Hence, the latter method simply has one more element than the ## former. x <- seq.int(from, to, by) Fx <- eval(cdf, envir = list(x = x), enclos = parent.frame()) return(c(if(method == "lower") 0, diff(Fx))) } if (method == "rounding") { ## Rounding method assigns to point x = from the probability ## mass F(from + step/2) - F(from) and to point x = from + ## step, ..., to - step the probability mass F(x - step/2) - ## F(x + step/2). ## ## It is possible to make adjustments for the limits of the ## intervals (closed or open) for discrete distributions via ## 'cdf'. x <- c(from, seq.int(from + by/2, to - by/2, by)) Fx <- eval(cdf, envir = list(x = x), enclos = parent.frame()) return(diff(Fx)) } if (method == "unbiased") { ## This is the matching of the first moment method. It ## requires a function to compute the first limited moment ## which should be provided in argument 'lev'. The latter is ## specified just like 'cdf'. if (missing(lev)) stop(sprintf("%s required with method %s", sQuote("lev"), dQuote("unbiased"))) slev <- substitute(lev) if (is.name(slev)) { fcall <- paste(slev, "(x)") lev <- parse(text = fcall) } else { if (!(is.call(slev) && match("x", all.vars(slev), nomatch = 0))) stop(sprintf("%s must be a function or an expression containing %s", sQuote("lev"), sQuote("x"))) lev <- slev } ## The first limited moment must be evaluated in x = from, ## from + step, ..., to and the cdf in x = from and x = to ## only (see below). x <- seq.int(from, to, by) Ex <- eval(lev, envir = list(x = x), enclos = parent.frame()) Fx <- eval(cdf, envir = list(x = c(from, to)), enclos = parent.frame()) ## The probability mass in x = from is ## ## (E[X ^ x] - E[X ^ x + step])/step + 1 - F(x). ## ## The probability mass in x = from + step, ..., to - step is ## ## (2 * E[X ^ x] - E[X ^ x - step] - E[X ^ x + step])/step. ## ## The probability mass in x = to is ## ## (E[X ^ x] - E[X ^ x - step])/step - 1 + F(x). ## ## See exercise 6.36 in Loss Models, 2nd edition. return(c(-diff(head(Ex, 2))/by + 1 - Fx[1], (2 * head(Ex[-1], -1) - head(Ex, -2) - tail(Ex, -2))/by, diff(tail(Ex, 2))/by - 1 + Fx[2])) } } discretise <- discretize actuar/R/Pareto2.R0000644000176200001440000000266315147745722013416 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}pareto2 functions to compute ### characteristics of the Pareto (type) II distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = 1 - (1/(1 + v))^shape, x > min, ### ### where v = (x - min)/scale. ### ### See Arnold, B. C. (2015), Pareto Distributions, Second Edition, ### CRC Press. ### ### AUTHOR: Vincent Goulet dpareto2 <- function (x, min, shape, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dpareto2", x, min, shape, scale, log) ppareto2 <- function (q, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ppareto2", q, min, shape, scale, lower.tail, log.p) qpareto2 <- function (p, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qpareto2", p, min, shape, scale, lower.tail, log.p) rpareto2 <- function(n, min, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rpareto2", n, min, shape, scale) mpareto2 <- function(order, min, shape, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mpareto2", order, min, shape, scale, FALSE) levpareto2 <- function(limit, min, shape, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levpareto2", limit, min, shape, scale, order, FALSE) actuar/R/Extract.grouped.data.R0000644000176200001440000001033515147745722016063 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Extraction and replacement methods for grouped data ### objects ### ### AUTHORS: Vincent Goulet , ### Mathieu Pigeon, Louis-Philippe Pouliot "[.grouped.data" <- function(x, i, j) { ## Only columns to extract are specified. if (nargs() < 3L) { if (missing(i)) return(x) if (is.matrix(i)) return(as.matrix(x)[i]) res <- as.data.frame(NextMethod()) if (length(i) > 1 && 1 %in% seq(ncol(x))[i]) { environment(res) <- environment(x) class(res) <- c("grouped.data", class(res)) } return(res) } ## Convert row and column indexes to strictly positive integers. ii <- if (missing(i)) seq.int(nrow(x)) else seq.int(nrow(x))[i] ij <- if (missing(j)) integer(0) else seq.int(ncol(x))[j] ## Extraction of at least the group boundaries (the complicated case). if (!length(ij) || 1L %in% ij) { ## Extraction of group boundaries in increasing order only ## (untractable otherwise). if (is.unsorted(ii)) { warning("rows extracted in increasing order") ii <- sort(ii) } ## Fetch the appropriate group boundaries. cj <- eval(expression(cj), envir = environment(x)) cj <- cj[sort(unique(c(ii, ii + 1L)))] ## Extraction of the first column only: return the vector of group ## boundaries. if (identical(ij, 1L)) return(cj) ## Return a modified 'grouped.data' object. res <- NextMethod() environment(res) <- new.env() assign("cj", cj, environment(res)) return(res) } ## All other cases handled like a regular data frame. NextMethod() } "[<-.grouped.data" <- function(x, i, j, value) { nA <- nargs() if (nA == 4L) { ii <- if (missing(i)) NULL else i ij <- if (missing(j)) NULL else j } else if (nA == 3L) { ## No arguments inside [ ]: only replacing by NULL is supported. if (missing(i) && missing(j)) { if (is.null(value)) return(x[logical(0)]) stop("impossible to replace boundaries and frequencies simultaneously") } ## Indexing by a logical matrix is supported, but only two ## types of replacement are allowed: replacing in the ## first column only, or replacing in any column but the ## first. if (is.logical(i) && is.matrix(i) && all(dim(i) == dim(x))) { ij <- apply(i, 2, any) # columns with replacements if (match(TRUE, ij) == 1) # boundaries to replace { if (length(ij) > 1) # boundaries and frequencies stop("impossible to replace boundaries and frequencies simultaneously") ii <- i[, ij] # boundaries only } return(NextMethod()) # frequencies only } ## Indexing by a non logical matrix is not supported. if (is.matrix(i)) stop("only logical matrix subscripts are allowed in replacement") ## Indexing by a vector: the argument specifies columns to ## replace. ij <- i ii <- NULL } else stop("need 0, 1, or 2 subscripts") ## Convert row and column indexes to integers. ii <- if (is.null(ii)) seq.int(nrow(x)) else seq.int(nrow(x))[ii] ij <- if (is.null(ij)) integer(0) else seq.int(ncol(x))[ij] ## Replacement at least in the group boundaries column. if (!length(ij) || 1L %in% ij) { ## supported: replacement of group boundaries only if (identical(ij, 1L)) { cj <- eval(expression(cj), envir = environment(x)) cj[sort(unique(c(ii, ii + 1L)))] <- value res <- grouped.data(cj, x[, -1L]) names(res) <- names(x) return(res) } ## not supported (untractable): replacement in the column of ## boundaries and any other column stop("impossible to replace boundaries and frequencies simultaneously") } ## All other cases handled like a regular data frame. NextMethod() } actuar/R/GeneralizedPareto.R0000644000176200001440000000326215147745722015502 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}genpareto functions to compute ### characteristics of the Generalized Pareto distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = Pr[Y <= x / (x + scale)], ### ### where Y has a Beta distribution with parameters shape2 and shape1. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dgenpareto <- function(x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dgenpareto", x, shape1, shape2, scale, log) pgenpareto <- function(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pgenpareto", q, shape1, shape2, scale, lower.tail, log.p) qgenpareto <- function(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qgenpareto", p, shape1, shape2, scale, lower.tail, log.p) rgenpareto <- function(n, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rgenpareto", n, shape1, shape2, scale) mgenpareto <- function(order, shape1, shape2, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mgenpareto", order, shape1, shape2, scale, FALSE) levgenpareto <- function(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levgenpareto", limit, shape1, shape2, scale, order, FALSE) actuar/R/InverseExponential.R0000644000176200001440000000242415147745722015717 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}invexp functions to compute ### characteristics of the Inverse Exponential distribution. The ### version used in these functions has cumulative distribution ### function ### ### Pr[X <= x] = exp(-scale/x), x > 0. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dinvexp <- function (x, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dinvexp", x, scale, log) pinvexp <- function(q, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pinvexp", q, scale, lower.tail, log.p) qinvexp <- function(p, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qinvexp", p, scale, lower.tail, log.p) rinvexp <- function(n, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rinvexp", n, scale) minvexp <- function(order, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "minvexp", order, scale, FALSE) levinvexp <- function(limit, rate = 1, scale = 1/rate, order) .External(C_actuar_do_dpq, "levinvexp", limit, scale, order, FALSE) actuar/R/severity.R0000644000176200001440000000120015147745722013736 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Display all values of a matrix of vectors by 'unrolling' the ### object vertically or horizontally. ### ### AUTHORS: Louis-Philippe Pouliot, ### Vincent Goulet ### New generic severity <- function(x, ...) UseMethod("severity") ### Default method. Currently identical to 'unroll' by lack of a ### better alternative. This default method is never called in the ### package. severity.default <- function(x, bycol = FALSE, drop = TRUE, ...) { chkDots(...) # method does not use '...' unroll(x, bycol, drop) } actuar/R/bstraub.R0000644000176200001440000001173615147745722013545 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Buhlmann-Straub credibility model calculations. ### ### Computation of the between variance estimators has been moved to ### external functions bvar.unbiased() and bvar.iterative() to share ### with hache(). ### ### AUTHORS: Vincent Goulet , ### Sebastien Auclair, Louis-Philippe Pouliot bstraub <- function(ratios, weights, method = c("unbiased", "iterative"), tol = sqrt(.Machine$double.eps), maxit = 100, echo = FALSE) { ## If weights are not specified, use equal weights as in ## Buhlmann's model. if (missing(weights)) { if (any(is.na(ratios))) stop("missing ratios not allowed when weights are not supplied") weights <- array(1, dim(ratios)) } ## Check other bad arguments. if (ncol(ratios) < 2L) stop("there must be at least one node with more than one period of experience") if (nrow(ratios) < 2L) stop("there must be more than one node") if (!identical(which(is.na(ratios)), which(is.na(weights)))) stop(sprintf("missing values are not in the same positions in %s and in %s", sQuote("weights"), sQuote("ratios"))) if (all(!weights, na.rm = TRUE)) stop("no available data to fit model") ## Individual weighted averages. It could happen that a contract ## has no observations, for example when applying the model on ## claim amounts. In such a situation, we will put the total ## weight of the contract and the weighted average both equal to ## zero. That way, the premium will be equal to the credibility ## weighted average, as it should, but the contract will have no ## contribution in the calculations. weights.s <- rowSums(weights, na.rm = TRUE) ratios.w <- ifelse(weights.s > 0, rowSums(weights * ratios, na.rm = TRUE)/weights.s, 0) ## Size of the portfolio. ncontracts <- sum(weights.s > 0) ntotal <- sum(!is.na(weights)) ## Collective weighted average. weights.ss <- sum(weights.s) ## Estimation of s^2 s2 <- sum(weights * (ratios - ratios.w)^2, na.rm = TRUE)/(ntotal - ncontracts) ## First estimation of a. Always compute the unbiased estimator. a <- bvar.unbiased(ratios.w, weights.s, s2, ncontracts) ## Iterative estimation of a. Compute only if ## 1. asked to in argument; ## 2. weights are not all equal (Buhlmann model). ## 3. the unbiased estimator is > 0; method <- match.arg(method) if (method == "iterative" && diff(range(weights, na.rm = TRUE)) > .Machine$double.eps^0.5) { a <- if (a > 0) bvar.iterative(ratios.w, weights.s, s2, ncontracts, start = a, tol = tol, maxit = maxit, echo = echo) else 0 } ## Final credibility factors and estimator of the collective mean. if (a > 0) { cred <- 1/(1 + s2/(weights.s * a)) ratios.zw <- drop(crossprod(cred, ratios.w))/sum(cred) } else { cred <- numeric(length(weights.s)) ratios.zw <- drop(crossprod(weights.s, ratios.w))/sum(weights.s) } structure(list(means = list(ratios.zw, ratios.w), weights = list(if (a > 0) sum(cred) else weights.ss, weights.s), unbiased = if (method == "unbiased") c(a, s2), iterative = if (method == "iterative") c(a, s2), cred = cred, nodes = list(nrow(weights))), class = "bstraub", model = "Buhlmann-Straub") } predict.bstraub <- function(object, levels = NULL, newdata, ...) structure(object$means[[1L]] + object$cred * (object$means[[2L]] - object$means[[1L]]), ...) ## Alias for the linear Bayes case predict.bayes <- predict.bstraub bvar.unbiased <- function(x, w, within, n) { w.s <- sum(w) x.w <- drop(crossprod(w, x))/w.s w.s * (drop(crossprod(w, (x - x.w)^2)) - (n - 1) * within)/(w.s^2 - sum(w^2)) } ### codetools does not like the way 'a1' is defined in function ### 'bvar.iterative' below. Avoid false positive in R CMD check. if (getRversion() >= "2.15.1") utils::globalVariables(c("a1")) bvar.iterative <- function(x, w, within, n, start, tol = sqrt(.Machine$double.eps), maxit = 100, echo = FALSE) { if (echo) { cat("Iteration\tBetween variance estimator\n") expr <- expression(cat(" ", count, "\t\t ", a1 <- a, fill = TRUE)) } else expr <- expression(a1 <- a) a <- start count <- 0L repeat { eval(expr) if (maxit < (count <- count + 1L)) { warning("maximum number of iterations reached before obtaining convergence") break } cred <- 1/(1 + within/(w * a)) x.z <- drop(crossprod(cred, x))/sum(cred) a <- drop(crossprod(cred, (x - x.z)^2))/(n - 1) if (abs((a - a1)/a1) < tol) break } a } actuar/R/ZeroTruncatedBinomial.R0000644000176200001440000000200615147745722016335 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}ztbinom functions to compute ### characteristics of the Zero Truncated Binomial distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Second ### Edition, Wiley, 2004. ### ### AUTHOR: Vincent Goulet dztbinom <- function (x, size, prob, log = FALSE) .External(C_actuar_do_dpq, "dztbinom", x, size, prob, log) pztbinom <- function(q, size, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pztbinom", q, size, prob, lower.tail, log.p) qztbinom <- function(p, size, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qztbinom", p, size, prob, lower.tail, log.p) rztbinom <- function(n, size, prob) .External(C_actuar_do_random, "rztbinom", n, size, prob) ## not exported; for internal use in panjer() pgfztbinom <- function(x, size, prob) { qn <- (1 - prob)^size (exp(size * log1p(prob * (x - 1))) - qn)/(1 - qn) } actuar/R/UniformSupp.R0000644000176200001440000000136115147745722014363 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {m,lev,mgf}unif functions to compute raw and ### limited moments, and the moment generating function for the ### Uniform distribution (as defined in R). ### ### ### ### AUTHORS: Christophe Dutang, Vincent Goulet munif <- function(order, min = 0, max = 1) .External(C_actuar_do_dpq, "munif", order, min, max, FALSE) levunif <- function(limit, min = 0, max =1, order = 1) .External(C_actuar_do_dpq, "levunif", limit, min, max, order, FALSE) mgfunif <- function(t, min = 0, max = 1, log = FALSE) .External(C_actuar_do_dpq, "mgfunif", t, min, max, log) actuar/R/Loggamma.R0000644000176200001440000000231315147745722013616 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}lgamma functions to compute ### characteristics of the Loggamma distribution. The version used in ### these functions has cumulative distribution function ### ### Pr[X <= x] = pgamma(log(x), shape = shapelog, rate = ratelog). ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dlgamma <- function(x, shapelog, ratelog, log = FALSE) .External(C_actuar_do_dpq, "dlgamma", x, shapelog, ratelog, log) plgamma <- function(q, shapelog, ratelog, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "plgamma", q, shapelog, ratelog, lower.tail, log.p) qlgamma <- function(p, shapelog, ratelog, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qlgamma", p, shapelog, ratelog, lower.tail, log.p) rlgamma <- function(n, shapelog, ratelog) .External(C_actuar_do_random, "rlgamma", n, shapelog, ratelog) mlgamma <- function(order, shapelog, ratelog) .External(C_actuar_do_dpq, "mlgamma", order, shapelog, ratelog, FALSE) levlgamma <- function(limit, shapelog, ratelog, order = 1) .External(C_actuar_do_dpq, "levlgamma", limit, shapelog, ratelog, order, FALSE) actuar/R/TransformedBeta.R0000644000176200001440000000356215147745722015161 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r,m,lev}trbeta functions to compute ### characteristics of the Transformed Beta distribution. The version ### used in these functions has cumulative distribution function ### ### Pr[X <= x] = Pr[Y <= (x/scale)^shape2 / (1 + (x/scale)^shape2)], ### ### where Y has a Beta distribution with parameters shape3 and shape1. ### ### See Appendix A of Klugman, Panjer & Willmot, Loss Models, Wiley. ### ### AUTHORS: Mathieu Pigeon, Vincent Goulet dtrbeta <- function (x, shape1, shape2, shape3, rate = 1, scale = 1/rate, log = FALSE) .External(C_actuar_do_dpq, "dtrbeta", x, shape1, shape2, shape3, scale, log) ptrbeta <- function (q, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "ptrbeta", q, shape1, shape2, shape3, scale, lower.tail, log.p) qtrbeta <- function (p, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qtrbeta", p, shape1, shape2, shape3, scale, lower.tail, log.p) rtrbeta <- function (n, shape1, shape2, shape3, rate = 1, scale = 1/rate) .External(C_actuar_do_random, "rtrbeta", n, shape1, shape2, shape3, scale) mtrbeta <- function (order, shape1, shape2, shape3, rate = 1, scale = 1/rate) .External(C_actuar_do_dpq, "mtrbeta", order, shape1, shape2, shape3, scale, FALSE) levtrbeta <- function (limit, shape1, shape2, shape3, rate = 1, scale = 1/rate, order = 1) .External(C_actuar_do_dpq, "levtrbeta", limit, shape1, shape2, shape3, scale, order, FALSE) ## Aliases dpearson6 <- dtrbeta ppearson6 <- ptrbeta qpearson6 <- qtrbeta rpearson6 <- rtrbeta mpearson6 <- mtrbeta levpearson6 <- levtrbeta actuar/R/ZeroTruncatedNegativeBinomial.R0000644000176200001440000000201315147745722020016 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,q,r}ztnbinom functions to compute ### characteristics of the Zero Truncated Negative Binomial ### distribution. ### ### See Appendix B of Klugman, Panjer & Willmot, Loss Models, Second ### Edition, Wiley, 2004. ### ### AUTHOR: Vincent Goulet dztnbinom <- function (x, size, prob, log = FALSE) .External(C_actuar_do_dpq, "dztnbinom", x, size, prob, log) pztnbinom <- function(q, size, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pztnbinom", q, size, prob, lower.tail, log.p) qztnbinom <- function(p, size, prob, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qztnbinom", p, size, prob, lower.tail, log.p) rztnbinom <- function(n, size, prob) .External(C_actuar_do_random, "rztnbinom", n, size, prob) ## not exported; for internal use in panjer() pgfztnbinom <- function(x, size, prob) expm1(-size * log1p(x * (prob - 1)))/expm1(-size * log(prob)) actuar/R/ZeroModifiedLogarithmic.R0000644000176200001440000000167115147745722016643 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Definition of the {d,p,r}zmlogarithmic functions to compute ### characteristics of the zero modified logarithmic distribution. See ### ./Logarithmic.R for details on the parametrization. ### ### See p. 93 of Klugman, Panjer & Willmot, Loss Models, Fourth ### Edition, Wiley, 2012. ### ### AUTHOR: Vincent Goulet dzmlogarithmic <- function(x, prob, p0, log = FALSE) .External(C_actuar_do_dpq, "dzmlogarithmic", x, prob, p0, log) pzmlogarithmic <- function(q, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "pzmlogarithmic", q, prob, p0, lower.tail, log.p) qzmlogarithmic <- function(p, prob, p0, lower.tail = TRUE, log.p = FALSE) .External(C_actuar_do_dpq, "qzmlogarithmic", p, prob, p0, lower.tail, log.p) rzmlogarithmic <- function(n, prob, p0) .External(C_actuar_do_random, "rzmlogarithmic", n, prob, p0) actuar/demo/0000755000176200001440000000000015147745722012473 5ustar liggesusersactuar/demo/risk.R0000644000176200001440000003024615147745722013573 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Demo of the risk theory facilities provided by actuar ### ### AUTHORS: Christophe Dutang, Vincent Goulet require(actuar) require(graphics) ### DISCRETIZATION OF CONTINUOUS DISTRIBUTIONS ## Upper and lower discretization of a Gamma(2, 1) distribution with a ## step (or span, or lag) of 0.5. The value of 'to' is chosen so as to ## cover most of the distribution. x <- seq(0, qgamma(1 - 1E-6, 2, 1), by = 0.5) xu <- tail(x, 1) fu <- discretize(pgamma(x, 2, 1), method = "upper", from = 0, to = xu, step = 0.5) fl <- discretize(pgamma(x, 2, 1), method = "lower", from = 0, to = xu, step = 0.5) curve(pgamma(x, 2, 1), xlim = range(x), lwd = 2) par(col = "blue") plot(stepfun(head(x, -1), diffinv(fu)), pch = 19, add = TRUE) par(col = "green") plot(stepfun(x, diffinv(fl)), pch = 19, add = TRUE) par(col = "black") ## Discretization with the rounding method, which has the true cdf ## pass through the midpoints of the intervals [x - step/2, x + ## step/2). fr <- discretize(pgamma(x, 2, 1), method = "rounding", from = 0, to = xu, step = 0.5) curve(pgamma(x, 2, 1), xlim = range(x), lwd = 2) par(col = "blue") plot(stepfun(head(x, -1), diffinv(fr)), pch = 19, add = TRUE) par(col = "black") ## Local matching of the first moment. This requires a function to ## compute the limited expected value of the true distribution in any ## point. fb <- discretize(pgamma(x, 2, 1), method = "unbiased", lev = levgamma(x, 2, 1), from = 0, to = xu, step = 0.5) curve(pgamma(x, 2, 1), xlim = range(x), lwd = 2) par(col = "blue") plot(stepfun(x, diffinv(fb)), pch = 19, add = TRUE) par(col = "black") all.equal(diff(pgamma(range(x), 2, 1)), sum(fb)) # same total probability all.equal(levgamma(xu, 2, 1) - xu * pgamma(xu, 2, 1, lower.tail = FALSE), drop(crossprod(x, fb))) # same expected value ## Comparison of all four methods fu <- discretize(plnorm(x), method = "upper", from = 0, to = 5) fl <- discretize(plnorm(x), method = "lower", from = 0, to = 5) fr <- discretize(plnorm(x), method = "rounding", from = 0, to = 5) fb <- discretize(plnorm(x), method = "unbiased", from = 0, to = 5, lev = levlnorm(x)) curve(plnorm(x), from = 0, to = 5, lwd = 2) par(col = "blue") plot(stepfun(0:4, diffinv(fu)), pch = 19, add = TRUE) par(col = "red") plot(stepfun(0:5, diffinv(fl)), pch = 19, add = TRUE) par(col = "green") plot(stepfun(0:4, diffinv(fr)), pch = 19, add = TRUE) par(col = "magenta") plot(stepfun(0:5, diffinv(fb)), pch = 19, add = TRUE) legend("bottomright", legend = c("upper", "lower", "rounding", "unbiased"), col = c("blue", "red", "green", "magenta"), lty = 1, pch = 19, text.col = "black") par(col = "black") ### CALCULATION OF THE AGGREGATE CLAIM AMOUNT DISTRIBUTION ## Calculation of the aggregate claim amount distribution using the ## recursive method (Panjer). Argument 'x.scale' is used to specify ## how much a value of 1 is really worth. fx.b <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "unbiased", lev = levgamma(x, 2, 1)) Fs.b <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.b, lambda = 10, x.scale = 0.5) summary(Fs.b) # summary method knots(Fs.b) # support of Fs.b (knots) Fs.b(knots(Fs.b)) # evaluation at knots plot(Fs.b, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) # graphic mean(Fs.b) # empirical mean quantile(Fs.b) # quantiles ## Convolutions (exact calculation). Requires a vector of ## probabilities for the frequency model. This method can quickly ## become impractical for a large expected number of claims. pn <- dpois(0:qpois(1-1E-6, 10), 10) Fs <- aggregateDist("convolution", model.freq = pn, model.sev = fx.b, x.scale = 0.5) summary(Fs) # summary method knots(Fs) # support of Fs (knots) Fs(knots(Fs)) # evaluation at knots plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) # graphic mean(Fs) # empirical mean quantile(Fs) # quantiles ## Normal approximation. Not hugely useful, but simple to implement... Fs.n <- aggregateDist("normal", moments = c(20, 60)) summary(Fs.n) # summary method plot(Fs.n, xlim = c(0, 60)) # graphic mean(Fs.n) # true mean quantile(Fs.n) # normal quantiles ## Normal Power II approximation. The approximation is valid for ## values above the expected value only. Fs.np <- aggregateDist("npower", moments = c(20, 60, 0.516398)) summary(Fs.np) # summary method plot(Fs.np, xlim = c(0, 60)) # truncated graphic ## Simulation method. Function 'simul' is used to simulate the data ## (see the 'simulation' demo for examples). Fs.s <- aggregateDist("simulation", model.freq = expression(y = rpois(10)), model.sev = expression(y = rgamma(2, 1)), nb.simul = 10000) summary(Fs.s) # summary method plot(Fs.s, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) # graphic mean(Fs.s) # empirical mean quantile(Fs.s) # quantiles ## Graphic comparing the cdfs obtained by a few methods. fx.u <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "upper") Fs.u <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.u, lambda = 10, x.scale = 0.5) fx.l <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "lower") Fs.l <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.l, lambda = 10, x.scale = 0.5) par(col = "black") plot(Fs.b, do.points = FALSE, verticals = TRUE, xlim = c(0, 60), sub = "") par(col = "blue") plot(Fs.u, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "red") plot(Fs.l, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "green") plot(Fs.s, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "magenta") plot(Fs.n, add = TRUE, sub = "") legend("bottomright", legend = c("recursive + unbiased", "recursive + upper", "recursive + lower", "simulation", "normal approximation"), col = c("black", "blue", "red", "green", "magenta"), lty = 1, text.col = "black", cex = 1.2) par(col = "black") ## Table of quantiles for the same methods as graphic above. x <- knots(Fs.l) m <- which.min(x[round(Fs.l(x), 6) > 0]) M <- which.max(x[round(Fs.u(x), 6) < 1]) x <- x[round(seq.int(from = m, to = M, length = 30))] round(cbind(x = x, Lower = Fs.l(x), Unbiased = Fs.b(x), Upper = Fs.u(x), Simulation = Fs.s(x), Normal = Fs.n(x)), 6) ### CALCULATION OF THE ADJUSTMENT COEFFICIENT ## No reinsurance, generalized Erlang claim amounts, inverse gamma ## interarrival times and independence. The adjustment coefficient is ## increasing with the safety loading. mgf <- function(x) 1/(1 - x) * 2/(2 - x) * 3/(3 - x) adjCoef(mgf, mgfinvgamma(x, 2, 6/11), 1.1, 1) adjCoef(mgf, mgfinvgamma(x, 2, 6/11), 1.2, 1) adjCoef(mgf, mgfinvgamma(x, 2, 6/11), 1.3, 1) ## More sophisticated example: comparison of the effect of dependence ## on the adjustment coefficient in the case of proportional ## reinsurance. Use a Clayton copula with exponential marginals. rclayton <- function(alpha, n) { val <- cbind(runif(n), runif(n)) val[, 2] <- (val[, 1]^(-alpha) * (val[, 2]^(-alpha/(alpha + 1)) - 1) + 1)^(-1/alpha) val } u <- rclayton(2, 1000) # variates with positive dependence x <- qexp(u[, 1]) # claim amounts w <- qexp(u[, 2]) # interarrival times ## Premium rate and Lundberg's functions of the retention rate. We ## assume a safety loading of 20% for the insurer and 30% for the ## reinsurer and premium calculated with the expected value principle. p <- function(a) mean(x)/mean(w) * (1.2 - 1.3 + 1.3 * a) h <- function(r, a) mean(exp(r * (a * x - p(a) * w))) R1 <- adjCoef(h = h, upper = 1, reinsurance = "prop", from = 1/3, to = 1) plot(R1) ## Repeat the above with independent claim amounts and interarrival ## times. u <- rclayton(1, 1000) # independent variates x <- qexp(u[,1]) # claim amounts w <- qexp(u[,2]) # interarrival times R2 <- adjCoef(h = h, upper = 1, reinsurance = "prop", from = 1/3, to = 1) plot(R2, add = TRUE, col = "green") legend("bottomright", legend = c("dependence", "independence"), col = c("black", "green"), lty = 1) ## Similar example with excess-of-loss reinsurance. ## positive dependence u <- rclayton(2, 1000) # variates with positive dependence x <- qexp(u[,1]) # claim amounts w <- qexp(u[,2]) # interarrival times p <- function(L) mean(x)/mean(w) * (1.2 - 1.3) + 1.3 * mean(pmin(L, x))/mean(w) h <- function(r, L) mean(exp(r * (pmin(L, x) - p(L) * w))) R3 <- adjCoef(h = h, upper = 1, reinsurance = "prop", from = 0, to = 10) plot(R3) u <- rclayton(1, 1000) # independent variates x <- qexp(u[,1]) # claim amounts w <- qexp(u[,2]) # interarrival times R4 <- adjCoef(h = h, upper = 1, reinsurance = "prop", from = 0, to = 10) plot(R4, add = TRUE, col = "green") legend("bottomright", legend = c("dependence", "independence"), col = c("black", "green"), lty = 1) ### CALCULATION OF RUIN PROBABILITIES ## Case with an explicit formula: exponential claims and interarrival ## times. Safety loading is always 20% and premiums are always ## calculated according to the expected value principle. psi <- ruin(claims = "exponential", par.claims = list(rate = 1), wait = "exponential", par.wait = list(rate = 1), premium = 1.2) psi(0:10) plot(psi, from = 0, to = 10) ## Exponential claims and hyper-exponential interarrival times. psi <- ruin(claims = "exponential", par.claims = list(rate = 2), wait = "exponential", par.wait = list(rate = c(2, 3, 1)/2, w = c(2, 3, 1)/6), premium = 1.2) psi(0:10) ## Hyper-exponential claims and interarrival times. psi <- ruin(claims = "exponential", par.claims = list(rate = c(2, 3, 1)/2, w = c(2, 3, 1)/6), wait = "exponential", par.wait = list(rate = c(2, 3, 1)/4, w = c(2, 3, 1)/6), premium = 0.6) psi(0:10) ## Exponential claims and Erlang interarrival times psi <- ruin(claims = "exponential", par.claims = list(rate = 2), wait = "Erlang", par.wait = list(shape = 2, rate = 1), premium = 1.2) psi(0:10) ## Erlang claims and interarrival times psi <- ruin(claims = "Erlang", par.claims = list(shape = 2, rate = 2), wait = "Erlang", par.wait = list(shape = 2, rate = 1), premium = 0.6) psi(0:10) ## Mixture of Erlang for claims and Erlang interarrival times psi <- ruin(claims = "Erlang", par.claims = list(shape = c(2, 4), rate = c(1, 3), w = c(1, 2)/3), wait = "Erlang", par.wait = list(shape = 2, rate = 1), premium = 1.2) psi(0:10) ## Generalized Erlang claims and mixture of two generalized Erlang ## interarrival times. These must be given as phase-type distributions ## to 'ruin'. prob.c <- c(1, 0, 2, 0)/3 rate.c <- cbind(c(-1, 0, 0, 0), c(1, -3, 0, 0), c(0, 0, -2, 0), c(0, 0, 2, -3)) mean.c <- mphtype(1, prob.c, rate.c) prob.w <- c(1, 0, 0) rate.w <- cbind(c(-1, 0, 0), c(1, -2, 0), c(0, 2, -3)) mean.w <- mphtype(1, prob.w, rate.w) psi <- ruin(claims = "phase-type", par.claims = list(prob = prob.c, rate = rate.c), wait = "phase-type", par.wait = list(prob = prob.w, rate = rate.w), premium = 1.2 * mean.c/mean.w) psi(0:10) par(op) actuar/demo/lossdist.R0000644000176200001440000002447715147745722014500 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Demo of the loss distributions facilities provided by actuar ### ### AUTHOR: Vincent Goulet require(actuar) require(graphics) ### A utility function to create graphs for probability laws showgraphs <- function(fun, par, what = c("d", "p", "m", "lev"), xlim) { dist <- switch(fun, trbeta = "TRANSFORMED BETA DISTRIBUTION", genpareto = "GENERALIZED PARETO DISTRIBUTION", burr = "BURR DISTRIBUTION", invburr = "INVERSE BURR DISTRIBUTION", pareto = "PARETO DISTRIBUTION", invpareto = "INVERSE PARETO DISTRIBUTION", llogis = "LOGLOGISTIC DISTRIBUTION", paralogis = "PARALOGISTIC DISTRIBUTION", invparalogis = "INVERSE PARALOGISTIC DISTRIBUTION", trgamma = "TRANSFORMED GAMMA DISTRIBUTION", invtrgamma = "INVERSE TRANSFORMED GAMMA DISTRIBUTION", invgamma = "INVERSE GAMMA DISTRIBUTION", weibull = "WEIBULL DISTRIBUTION", invweibull = "INVERSE WEIBULL DISTRIBUTION", invexp = "INVERSE EXPONENTIAL DISTRIBUTION", pareto1 = "SINGLE PARAMETER PARETO DISTRIBUTION", lgamma = "LOGGAMMA DISTRIBUTION", genbeta = "GENERALIZED BETA DISTRIBUTION", phtype = "PHASE-TYPE DISTRIBUTION", gamma = "GAMMA DISTRIBUTION", exp = "EXPONENTIAL DISTRIBUTION", chisq = "CHI-SQUARE DISTRIBUTION", lnorm = "LOGNORMAL DISTRIBUTION", invgauss = "INVERSE GAUSSIAN DISTRIBUTION", norm = "NORMAL DISTRIBUTION", beta = "BETA DISTRIBUTION", unif = "UNIFORM DISTRIBUTION") if (missing(xlim)) { qf <- match.fun(paste("q", fun, sep = "")) formals(qf)[names(par)] <- par xlim <- c(0, qf(0.999)) } k <- seq.int(4) limit <- seq(0, xlim[2], len = 10) mfrow = c(ceiling(length(what) / 2), 2) op <- par(mfrow = mfrow, oma = c(0, 0, 2, 0)) for (t in what) { f <- match.fun(paste(t, fun, sep = "")) formals(f)[names(par)] <- par main <- switch(t, "d" = "Probability Density Function", "p" = "Cumulative Distribution Function", "m" = "Raw Moments", "lev" = "Limited Expected Value Function", "mgf" = "Moment Generating Function") if (t == "m") plot(k, f(k), type = "l", col = 4, lwd = 2, main = main) else if (t == "lev") plot(limit, f(limit), type = "l", col = 4, lwd = 2, main = main) else if (t == "mgf") curve(f(x), xlim = c(0, 2), col = 4, lwd = 2, main = main) else curve(f(x), xlim = xlim, col = 4, lwd = 2, main = main) title(main = dist, outer = TRUE) } par(op) } ### ### DATA SETS ### ## The package includes the individual dental claims and grouped ## dental claims data sets often referred to in Klugman, Panjer & ## Willmot (1998, 2004) data(dental); dental data(gdental); gdental ### ### PROBABILITY LAWS ### ## Illustration of the new probability laws functions provided by the ## package. ## TRANSFORMED BETA FAMILY ## Transformed beta distribution showgraphs("trbeta", list(shape1 = 3, shape2 = 4, shape3 = 5, scale = 10)) ## Generalized Pareto distribution showgraphs("genpareto", list(shape1 = 10, shape2 = 4, scale = 10)) ## Burr distribution showgraphs("burr", list(shape1 = 3, shape2 = 4, scale = 10)) ## Inverse Burr distribution showgraphs("invburr", list(shape1 = 3, shape2 = 6, scale = 10)) ## Pareto distribution showgraphs("pareto", list(shape = 10, scale = 10)) ## Inverse Pareto distribution showgraphs("invpareto", list(shape = 4, scale = 1), what = c("d", "p")) ## Loglogistic distribution showgraphs("llogis", list(shape = 6, scale = 10)) ## Paralogistic distribution showgraphs("paralogis", list(shape = 3, scale = 10)) ## Inverse paralogistic distribution showgraphs("invparalogis", list(shape = 6, scale = 10)) ## TRANSFORMED GAMMA FAMILY ## Transformed gamma distribution showgraphs("trgamma", list(shape1 = 3, shape2 = 1, scale = 10)) ## Inverse transformed gamma distribution showgraphs("invtrgamma", list(shape1 = 3, shape2 = 2, scale = 10)) ## Inverse gamma distribution showgraphs("invgamma", list(shape = 6, scale = 10)) ## Weibull distribution ('mweibull' and 'levweibull') showgraphs("weibull", list(shape = 1.5, scale = 10)) ## Inverse Weibull distribution showgraphs("invweibull", list(shape = 6, scale = 10)) ## Inverse exponential distribution showgraphs("invexp", list(rate = 1), what = c("d", "p")) ## OTHER DISTRIBUTIONS ## Single parameter Pareto distribution showgraphs("pareto1", list(shape = 5, min = 10), xlim = c(0, 50)) ## Loggamma distribution showgraphs("lgamma", list(shapelog = 2, ratelog = 5)) ## Generalized beta distribution showgraphs("genbeta", list(shape1 = 1, shape2 = 2, shape3 = 3, scale = 2)) ## Phase-type distribution showgraphs("phtype", list(prob = c(0.5614, 0.4386), rates = matrix(c(-8.64, 0.101, 1.997, -1.095), 2, 2)), what = c("d", "p", "m", "mgf"), xlim = c(0.001, 5)) ## DISTRIBUTIONS ALREADY IN R ## Gamma distribution showgraphs("gamma", list(shape = 3, rate = 5), what = c("m", "lev", "mgf")) ## Chi-square distribution showgraphs("chisq", list(df = 3), what = c("m", "lev", "mgf")) ## Exponential distribution showgraphs("exp", list(rate = 5), what = c("m", "lev", "mgf")) ## Lognormal distribution showgraphs("lnorm", list(meanlog = 1, sdlog = 1), what = c("m", "lev")) ## Inverse gaussian distribution (from package SuppDists) showgraphs("invgauss", list(nu = 1, lambda = 10), what = c("m", "lev", "mgf"), xlim = c(0, 10)) ## Normal distribution showgraphs("norm", list(mean = 0, sd = 1), what = c("m", "mgf")) ## Beta distribution showgraphs("beta", list(shape1 = 1, shape2 = 2), what = c("m", "lev")) ## Uniform distribution showgraphs("unif", list(min = 0, max = 1), what = c("m", "lev", "mgf")) ### ### GROUPED DATA MANIPULATION ### ## Creation of grouped data objects x <- grouped.data(groups = c(0, 25, 50, 100, 150, 250, 500), line1 = c(30, 31, 57, 42, 65, 84), line2 = c(26, 33, 31, 19, 16, 11)) x ## Extraction and replacement: only "[" and "[<-" are officially ## supported. x[, 1] # group boundaries x[1] # notice the difference x[, -1] # group frequencies x[1:3,] # first 3 groups x[1, 2] <- 22; x # frequency replacement x[1, 1] <- c(0, 20); x # boundary replacement ## Mean, variance and standard deviation for grouped data objects. mean(x) var(x) sd(x) ## In the sequel, only the first frequencies column is considered. x <- x[, -3] ## Function 'hist' handles individual data only. We provide a method ## for grouped data. hist(x) ## Function 'ogive' returns a function to compute the ogive of grouped ## data in any point, much like 'ecdf' does for individual data. ## Methods also exist to extract the group boundaries ('knots') and ## to plot the ogive. Fnt <- ogive(x) summary(Fnt) knots(Fnt) # group boundaries Fnt(knots(Fnt)) # ogive at group boundaries plot(Fnt) # plot of the ogive ## The method of 'quantile' for grouped data objects computes linearly ## smoothed quantiles, that is the inverse of the ogive in various ## points. quantile(x) Fnt(quantile(x)) ## The method of 'summary' for grouped data objects returns the ## quantiles and the mean in a single object. summary(x) ### ### EMPIRICAL MOMENTS CALCULATION ### ## Function 'emm' computes the k-th empirical moment of a sample, ## whether it is individual or grouped data. emm(dental) # == mean(dental) emm(gdental) # == mean(gdental) emm(dental, order = 1:3) # first three moments emm(gdental, order = 1:3) # idem ## Function 'elev' is similar to 'ecdf' and 'ogive' in that it returns ## a function to compute the empirical limited expected value (first ## limited moment) for any limit. There are methods for individual and ## grouped data. lev <- elev(dental) lev(knots(lev)) # ELEV at data points plot(lev, type = "o", pch = 19) # plot of the ELEV function lev <- elev(gdental) lev(knots(lev)) # ELEV at data points plot(lev, type = "o", pch = 19) # plot of the ELEV function ### ### MINIMUM DISTANCE ESTIMATION ### ## Maximum likelihood estimation (for individual data) is well covered ## by 'fitdistr' in package MASS. We provide function 'mde' to fit ## models using three distance minimization techniques: Cramer-von ## Mises (for individual and grouped data), chi-square and layer ## average severity (both grouped data only). Usage (and inner ## working) is very similar to 'fitdistr'. mde(dental, pexp, start = list(rate = 1/200), measure = "CvM") mde(gdental, pexp, start = list(rate = 1/200), measure = "CvM") mde(gdental, pexp, start = list(rate = 1/200), measure = "chi-square") mde(gdental, levexp, start = list(rate = 1/200), measure = "LAS") ### ### COVERAGE MODIFICATIONS ### ## Function 'coverage' is useful to obtain the probability density ## function (pdf) or cumulative distribution function (cdf) of a loss ## random variable under coverage modifications. f <- coverage(dgamma, pgamma, deductible = 1, limit = 7) curve(dgamma(x, 3), xlim = c(0, 10), ylim = c(0, 0.3)) # original curve(f(x, 3), xlim = c(0.01, 5.99), col = 4, add = TRUE) # modified x <- rgamma(1000, 3, 1) # sample of claim amounts x <- pmin(x, 7)[x > 1] - 1 # deductible and limit library(MASS) # for ML estimation m <- mean(x) # empirical mean v <- var(x) # empirical variance (p <- fitdistr(x, f, start = list(shape = m^2/v, rate = m/v))$estimate ) # MLE hist(x + 1, breaks = 0:10, prob = TRUE) # histogram of observed data curve(dgamma(x, p[1], p[2]), add = TRUE) # fit of underlying distribution par(op) actuar/demo/00Index0000644000176200001440000000023115147745722013621 0ustar liggesuserscredibility credibility theory lossdist loss distributions modeling risk risk and ruin theory simulation simulation of compound hierarchical models actuar/demo/simulation.R0000644000176200001440000000605315147745722015006 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Demo of the portfolio simulation facilities provided by actuar ### ### AUTHOR: Vincent Goulet require(actuar) ## A simple Compound Poisson model: S_t = C_1 + ... + C_{N_t}, with ## N_t ~ Poisson(10), C ~ Lognormal(log(1500) - 1, 1). The names of ## the components serve no purpose here but are required. pf <- rcomphierarc(list(y = 10), model.freq = expression(y = rpois(10)), model.sev = expression(y = rlnorm(log(1500) - 1, 1))) pf # print method aggregate(pf) # aggregate claim amounts frequency(pf) # frequencies severity(pf) # individual claim amounts severity(pf, splitcol = 10) # last period separate ## Simple (continuous) mixture of models: S_t|Theta ~ Poisson(Theta), ## Theta ~ Gamma(2, 1). Any names can be used in the model. pf <- rcomphierarc(list(Theta = 1, S = 10), model.freq = expression(Theta = rgamma(2, 1), S = rpois(Theta))) aggregate(pf) # actual data frequency(pf) # same, here ## Model with with mixtures for both frequency and severity. pf <- rcomphierarc(list(entity = 10, year = 5), model.freq = expression(entity = rgamma(2, 1), year = rpois(entity)), model.sev = expression(entity = rnorm(5, 1), year = rlnorm(entity, 1))) pf aggregate(pf) frequency(pf) ## Same model as above, but with weights incorporated into the model. ## The string "weights" should appear in the model specification ## wherever weights are to be used. wit <- runif(10, 2, 10) (wit <- runif(50, rep(0.5 * wit, each = 5), rep(1.5 * wit, each = 5))) (pf <- rcomphierarc(list(entity = 10, year = 5), model.freq = expression(entity = rgamma(2, 1), year = rpois(weights * entity)), model.sev = expression(entity = rnorm(5, 1), year = rlnorm(entity, 1)), weights = wit)) weights(pf) # extraction of weights ## Three level hierarchical model (sector, unit, contract). Claim ## severity varies only by sector and unit. The number of "nodes" at ## each level is different. nodes <- list(sector = 2, unit = c(3, 4), contract = c(10, 5, 8, 5, 7, 11, 4), year = 6) mf <- expression(sector = rexp(2), unit = rgamma(sector, 0.1), contract = rgamma(unit, 1), year = rpois(weights * contract)) ms <- expression(sector = rnorm(2, sqrt(0.1)), unit = rnorm(sector, 1), contract = NULL, year = rlnorm(unit, 1)) wijkt <- runif(50, 2, 10) wijkt <- runif(300, rep(0.5 * wijkt, each = 6), rep(1.5 * wijkt, each = 6)) pf <- rcomphierarc(nodes, model.freq = mf, model.sev = ms, weights = wijkt) frequency(pf) weights(pf) actuar/demo/credibility.R0000644000176200001440000000612415147745722015124 0ustar liggesusers### actuar: Actuarial Functions and Heavy Tailed Distributions ### ### Demo of the credibility theory facilities provided by actuar ### ### AUTHOR: Vincent Goulet require(actuar) ## The package provides the famous data set of Hachemeister (1975) as ## a matrix of 5 lines (one for each state) and 25 columns (the state ## number, 12 periods of ratios, 12 periods of corresponding weights). data(hachemeister) hachemeister ## Fitting of a Buhlmann model to the Hachemeister data set using ## function 'cm'. The interface of the function is similar to 'lm'. fit <- cm(~state, hachemeister, ratios = ratio.1:ratio.12) fit # print method summary(fit) # more information fit$means # (weighted) averages fit$weights # total weights fit$unbiased # unbiased variance estimators predict(fit) # credibility premiums ## Fitting of a Buhlmann-Straub model require weights. Here, iterative ## estimators of the variance components are used. fit <- cm(~state, hachemeister, ratios = ratio.1:ratio.12, weights = weight.1:weight.12, method = "iterative") summary(fit) predict(fit) ## Simulation of a three level hierarchical portfolio. nodes <- list(sector = 2, unit = c(3, 4), contract = c(10, 5, 8, 5, 7, 11, 4), year = 6) mf <- expression(sector = rexp(2), unit = rgamma(sector, 0.1), contract = rgamma(unit, 1), year = rpois(weights * contract)) ms <- expression(sector = rnorm(2, sqrt(0.1)), unit = rnorm(sector, 1), contract = NULL, year = rlnorm(unit, 1)) wijkt <- runif(50, 2, 10) wijkt <- runif(300, rep(0.5 * wijkt, each = 6), rep(1.5 * wijkt, each = 6)) pf <- simul(nodes, model.freq = mf, model.sev = ms, weights = wijkt) ## Fitting of a hierarchical model to the portfolio simulated above. DB <- cbind(weights(pf, prefix = "weight."), aggregate(pf, classif = FALSE) / weights(pf, classif = FALSE)) fit <- cm(~sector + sector:unit + sector:unit:contract, data = DB, ratios = year.1:year.6, weights = weight.year.1:weight.year.6) fit predict(fit) # credibility premiums predict(fit, levels = "unit") # unit credibility premiums only summary(fit) # portfolio summary summary(fit, levels = "unit") # unit portfolio summary only ## Fitting of Hachemeister regression model with intercept at time origin. fit <- cm(~state, hachemeister, ratios = ratio.1:ratio.12, weights = weight.1:weight.12, regformula = ~time, regdata = data.frame(time = 1:12)) summary(fit, newdata = data.frame(time = 13)) # 'newdata' is the future value of regressor predict(fit, newdata = data.frame(time = 13)) ## Position the intercept at the barycenter of time. fit <- cm(~state, hachemeister, ratios = ratio.1:ratio.12, weights = weight.1:weight.12, regformula = ~time, regdata = data.frame(time = 1:12), adj.intercept = TRUE) summary(fit, newdata = data.frame(time = 13)) actuar/vignettes/0000755000176200001440000000000015151412457013546 5ustar liggesusersactuar/vignettes/credibility.Rnw0000644000176200001440000006416115147745722016562 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Credibility theory} %\VignettePackage{actuar} %\SweaveUTF8 \title{Credibility theory features of \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Xavier Milhaud \\ Université Claude Bernard Lyon 1 \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} <>= library(actuar) options(width = 57, digits = 4, deparse.cutoff = 30L) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} Credibility models are actuarial tools to distribute premiums fairly among a heterogeneous group of policyholders (henceforth called \emph{entities}). More generally, they can be seen as prediction methods applicable in any setting where repeated measures are made for subjects with different risk levels. The credibility theory features of \pkg{actuar} consist of matrix \code{hachemeister} containing the famous data set of \cite{Hachemeister_75} and function \code{cm} to fit hierarchical (including Bühlmann, Bühlmann-Straub), regression and linear Bayes credibility models. Furthermore, function \code{rcomphierarc} can simulate portfolios of data satisfying the assumptions of the aforementioned credibility models; see the \code{"simulation"} vignette for details. \section{Hachemeister data set} \label{sec:hachemeister} The data set of \cite{Hachemeister_75} consists of private passenger bodily injury insurance average claim amounts, and the corresponding number of claims, for five U.S.\ states over 12 quarters between July 1970 and June 1973. The data set is included in the package in the form of a matrix with 5 rows and 25 columns. The first column contains a state index, columns 2--13 contain the claim averages and columns 14--25 contain the claim numbers: <>= data(hachemeister) hachemeister @ \section{Hierarchical credibility model} \label{sec:hierarchical} The linear model fitting function of R is \code{lm}. Since credibility models are very close in many respects to linear models, and since the credibility model fitting function of \pkg{actuar} borrows much of its interface from \code{lm}, we named the credibility function \code{cm}. Function \code{cm} acts as a unified interface for all credibility models supported by the package. Currently, these are: the unidimensional models of \cite{Buhlmann_69} and \cite{BS_70}; the hierarchical model of \cite{Jewell_75} (of which the first two are special cases); the regression model of \cite{Hachemeister_75}, optionally with the intercept at the barycenter of time \citep[Section~8.4]{Buhlmann_Gisler}; linear Bayes models. The modular design of \code{cm} makes it easy to add new models if desired. This section concentrates on usage of \code{cm} for hierarchical models. There are some variations in the formulas of the hierarchical model in the literature. We compute the credibility premiums as given in \cite{BJ_87} or \cite{Buhlmann_Gisler}, supporting three types of estimators of the between variance structure parameters: the unbiased estimators of \cite{Buhlmann_Gisler} (the default), the slightly different version of \cite{Ohlsson} and the iterative pseudo-estimators as found in \cite{LivreVert} or \cite{Goulet_JAP}. Consider an insurance portfolio where \emph{entities} are classified into \emph{cohorts}. In our terminology, this is a two-level hierarchical classification structure. The observations are claim amounts $S_{ijt}$, where index $i = 1, \dots, I$ identifies the cohort, index $j = 1, \dots, J_i$ identifies the entity within the cohort and index $t = 1, \dots, n_{ij}$ identifies the period (usually a year). To each data point corresponds a weight --- or volume --- $w_{ijt}$. Then, the best linear prediction for the next period outcome of a entity based on ratios $X_{ijt} = S_{ijt}/w_{ijt}$ is \begin{equation} \label{eq:hierarchical:premiums} \begin{split} \hat{\pi}_{ij} &= z_{ij} X_{ijw} + (1 - z_{ij}) \hat{\pi}_i \\ \hat{\pi}_i &= z_i X_{izw} + (1 - z_i) m, \end{split} \end{equation} with the credibility factors \begin{align*} z_{ij} &= \frac{w_{ij\pt}}{w_{ij\pt} + s^2/a}, & w_{ij\pt} &= \sum_{t = 1}^{n_{ij}} w_{ijt} \\ z_{i} &= \frac{z_{i\pt}}{z_{i\pt} + a/b}, & z_{i\pt} &= \sum_{j = 1}^{J_i} z_{ij} \end{align*} and the weighted averages \begin{align*} X_{ijw} &= \sum_{t = 1}^{n_{ij}} \frac{w_{ijt}}{w_{ij\pt}}\, X_{ijt} \\ X_{izw} &= \sum_{j = 1}^{J_i} \frac{z_{ij}}{z_{i\pt}}\, X_{ijw}. \end{align*} The estimator of $s^2$ is \begin{equation} \label{eq:s2} \hat{s}^2 = \frac{1}{\sum_{i = 1}^I \sum_{j = 1}^{J_i} (n_{ij} - 1)} \sum_{i = 1}^I \sum_{j = 1}^{J_i} \sum_{t = 1}^{n_{ij}} w_{ijt} (X_{ijt} - X_{ijw})^2. \end{equation} The three types of estimators for the variance components $a$ and $b$ are the following. First, let \begin{align*} A_i &= \sum_{j = 1}^{J_i} w_{ij\pt} (X_{ijw} - X_{iww})^2 - (J_i - 1) s^2 & c_i &= w_{i\pt\pt} - \sum_{j = 1}^{J_i} \frac{w_{ij\pt}^2}{w_{i\pt\pt}} \\ B &= \sum_{i = 1}^I z_{i\pt} (X_{izw} - \bar{X}_{zzw})^2 - (I - 1) a & d &= z_{\pt\pt} - \sum_{i = 1}^I \frac{z_{i\pt}^2}{z_{\pt\pt}}, \end{align*} with \begin{equation} \label{eq:Xbzzw} \bar{X}_{zzw} = \sum_{i = 1}^I \frac{z_{i\pt}}{z_{\pt\pt}}\, X_{izw}. \end{equation} (Hence, $\E{A_i} = c_i a$ and $\E{B} = d b$.) Then, the Bühlmann--Gisler estimators are \begin{align} \label{eq:ac-BG} \hat{a} &= \frac{1}{I} \sum_{i = 1}^I \max \left( \frac{A_i}{c_i}, 0 \right) \\ \label{eq:bc-BG} \hat{b} &= \max \left( \frac{B}{d}, 0 \right), \end{align} the Ohlsson estimators are \begin{align} \label{eq:ac-Ohl} \hat{a}^\prime &= \frac{\sum_{i = 1}^I A_i}{\sum_{i = 1}^I c_i} \\ \label{eq:bc-Ohl} \hat{b}^\prime &= \frac{B}{d} \end{align} and the iterative (pseudo-)estimators are \begin{align} \label{eq:at} \tilde{a} &= \frac{1}{\sum_{i = 1}^I (J_i - 1)} \sum_{i = 1}^I \sum_{j = 1}^{J_i} z_{ij} (X_{ijw} - X_{izw})^2 \\ \label{eq:bt} \tilde{b} &= \frac{1}{I - 1} \sum_{i = 1}^I z_i (X_{izw} - X_{zzw})^2, \end{align} where \begin{equation} \label{eq:Xzzw} X_{zzw} = \sum_{i = 1}^I \frac{z_i}{z_\pt}\, X_{izw}. \end{equation} Note the difference between the two weighted averages \eqref{eq:Xbzzw} and \eqref{eq:Xzzw}. See \cite{cm} for further discussion on this topic. Finally, the estimator of the collective mean $m$ is $\hat{m} = X_{zzw}$. The credibility modeling function \code{cm} assumes that data is available in the format most practical applications would use, namely a rectangular array (matrix or data frame) with entity observations in the rows and with one or more classification index columns (numeric or character). One will recognize the output format of \code{rcomphierarc} and its summary methods. Then, function \code{cm} works much the same as \code{lm}. It takes in argument: a formula of the form \code{\~{} terms} describing the hierarchical interactions in a data set; the data set containing the variables referenced in the formula; the names of the columns where the ratios and the weights are to be found in the data set. The latter should contain at least two nodes in each level and more than one period of experience for at least one entity. Missing values are represented by \code{NA}s. There can be entities with no experience (complete lines of \code{NA}s). In order to give an easily reproducible example, we group states 1 and 3 of the Hachemeister data set into one cohort and states 2, 4 and 5 into another. This shows that data does not have to be sorted by level. The fitted model below uses the iterative estimators of the variance components. <>= X <- cbind(cohort = c(1, 2, 1, 2, 2), hachemeister) fit <- cm(~cohort + cohort:state, data = X, ratios = ratio.1:ratio.12, weights = weight.1:weight.12, method = "iterative") fit @ The function returns a fitted model object of class \code{"cm"} containing the estimators of the structure parameters. To compute the credibility premiums, one calls a method of \code{predict} for this class. <>= predict(fit) @ One can also obtain a nicely formatted view of the most important results with a call to \code{summary}. <>= summary(fit) @ The methods of \code{predict} and \code{summary} can both report for a subset of the levels by means of an argument \code{levels}. <>= summary(fit, levels = "cohort") predict(fit, levels = "cohort") @ \section{Bühlmann and Bühlmann--Straub models} \label{sec:buhlmann} As mentioned above, the Bühlmann and Bühlmann--Straub models are simply one-level hierarchical models. In this case, the Bühlmann--Gisler and Ohlsson estimators of the between variance parameters are both identical to the usual \cite{BS_70} estimator \begin{equation} \label{eq:a-hat} \hat{a} = \frac{w_{\pt\pt}}{w_{\pt\pt}^2 - \sum_{i=1}^I w_{i\pt}^2} \left( \sum_{i=1}^I w_{i\pt} (X_{iw} - X_{ww})^2 - (I - 1) \hat{s}^2 \right), \end{equation} and the iterative estimator \begin{equation} \label{eq:a-tilde} \tilde{a} = \frac{1}{I - 1} \sum_{i = 1}^I z_i (X_{iw} - X_{zw})^2 \end{equation} is better known as the Bichsel--Straub estimator. To fit the Bühlmann model using \code{cm}, one simply does not specify any weights. <>= cm(~state, hachemeister, ratios = ratio.1:ratio.12) @ When weights are specified together with a one-level model, \code{cm} automatically fits the Bühlmann--Straub model to the data. In the example below, we use the Bichsel--Straub estimator for the between variance. <>= cm(~state, hachemeister, ratios = ratio.1:ratio.12, weights = weight.1:weight.12) @ \section{Regression model of Hachemeister} \label{sec:regression} The credibility regression model of \cite{Hachemeister_75} is a generalization of the Bühlmann--Straub model. If data shows a systematic trend, the latter model will typically under- or over-estimate the true premium of an entity. The idea of \citeauthor{Hachemeister_75} was to fit to the data a regression model where the parameters are a credibility weighted average of an entity's regression parameters and the group's parameters. In order to use \code{cm} to fit a credibility regression model to a data set, one simply has to supply as additional arguments \code{regformula} and \code{regdata}. The first one is a formula of the form \code{\~{} terms} describing the regression model, and the second is a data frame of regressors. That is, arguments \code{regformula} and \code{regdata} are in every respect equivalent to arguments \code{formula} and \code{data} of \code{lm}, with the minor difference that \code{regformula} does not need to have a left hand side (and is ignored if present). Below, we fit the model \begin{displaymath} X_{it} = \beta_0 + \beta_1 t + \varepsilon_t, \quad t = 1, \dots, 12 \end{displaymath} to the original data set of \cite{Hachemeister_75}. <>= fit <- cm(~state, hachemeister, regformula = ~ time, regdata = data.frame(time = 1:12), ratios = ratio.1:ratio.12, weights = weight.1:weight.12) fit @ To compute the credibility premiums, one has to provide the ``future'' values of the regressors as in \code{predict.lm}. <>= predict(fit, newdata = data.frame(time = 13)) @ It is well known that the basic regression model has a major drawback: there is no guarantee that the credibility regression line will lie between the collective and individual ones. This may lead to grossly inadequate premiums, as Figure~\ref{fig:state4} shows. \begin{figure}[t] \centering <>= plot(NA, xlim = c(1, 13), ylim = c(1000, 2000), xlab = "", ylab = "") x <- cbind(1, 1:12) lines(1:12, x %*% fit$means$portfolio, col = "blue", lwd = 2) lines(1:12, x %*% fit$means$state[, 4], col = "red", lwd = 2, lty = 2) lines(1:12, x %*% coefficients(fit$adj.models[[4]]), col = "darkgreen", lwd = 2, lty = 3) points(13, predict(fit, newdata = data.frame(time = 13))[4], pch = 8, col = "darkgreen") legend("bottomright", legend = c("collective", "individual", "credibility"), col = c("blue", "red", "darkgreen"), lty = 1:3) @ \caption{Collective, individual and credibility regression lines for State 4 of the Hachemeister data set. The point indicates the credibility premium.} \label{fig:state4} \end{figure} The solution proposed by \cite{Buhlmann:regression:1997} is simply to position the intercept not at time origin, but instead at the barycenter of time \citep[see also][Section~8.4]{Buhlmann_Gisler}. In mathematical terms, this essentially amounts to using an orthogonal design matrix. By setting the argument \code{adj.intercept} to \code{TRUE} in the call, \code{cm} will automatically fit the credibility regression model with the intercept at the barycenter of time. The resulting regression coefficients have little meaning, but the predictions are sensible. <>= fit2 <- cm(~state, hachemeister, regformula = ~ time, regdata = data.frame(time = 1:12), adj.intercept = TRUE, ratios = ratio.1:ratio.12, weights = weight.1:weight.12) summary(fit2, newdata = data.frame(time = 13)) @ % Figure~\ref{fig:state4:2} shows the beneficient effect of the intercept adjustment on the premium of State~4. \begin{figure}[t] \centering <>= plot(NA, xlim = c(1, 13), ylim = c(1000, 2000), xlab = "", ylab = "") x <- cbind(1, 1:12) R <- fit2$transition lines(1:12, x %*% solve(R, fit2$means$portfolio), col = "blue", lwd = 2) lines(1:12, x %*% solve(R, fit2$means$state[, 4]), col = "red", lwd = 2, lty = 2) lines(1:12, x %*% solve(R, coefficients(fit2$adj.models[[4]])), col = "darkgreen", lwd = 2, lty = 3) points(13, predict(fit2, newdata = data.frame(time = 13))[4], pch = 8, col = "darkgreen") legend("bottomright", legend = c("collective", "individual", "credibility"), col = c("blue", "red", "darkgreen"), lty = 1:3) @ \caption{Collective, individual and credibility regression lines for State 4 of the Hachemeister data set when the intercept is positioned at the barycenter of time. The point indicates the credibility premium.} \label{fig:state4:2} \end{figure} \section{Linear Bayes model} \label{sec:bayes} In the pure bayesian approach to the ratemaking problem, we assume that the observations $X_t$, $t = 1, \dots, n$, of an entity depend on its risk level $\theta$, and that this risk level is a realization of an unobservable random variable $\Theta$. The best (in the mean square sense) approximation to the unknown risk premium $\mu(\theta) = \E{X_t|\Theta = \theta}$ based on observations $X_1, \dots, X_n$ is the Bayesian premium \begin{equation*} B_{n + 1} = \E{\mu(\Theta)|X_1, \dots, X_n}. \end{equation*} It is then well known \citep{Buhlmann_Gisler,LossModels4e} that for some combinaisons of distributions, the Bayesian premium is linear and can written as a credibility premium \begin{equation*} B_{n + 1} = z \bar{X} + (1 - z) m, \end{equation*} where $m = \E{\mu(\Theta)}$ and $z = n/(n + K)$ for some constant $K$. The combinations of distributions yielding a linear Bayes premium involve members of the univariate exponential family for the distribution of $X|\Theta = \theta$ and their natural conjugate for the distribution of $\Theta$: \begin{itemize} \item $X|\Theta = \theta \sim \text{Poisson}(\theta)$, $\Theta \sim \text{Gamma}(\alpha, \lambda)$; \item $X|\Theta = \theta \sim \text{Exponential}(\theta)$, $\Theta \sim \text{Gamma}(\alpha, \lambda)$; \item $X|\Theta = \theta \sim \text{Normal}(\theta, \sigma^2_2)$, $\Theta \sim \text{Normal}(\mu, \sigma^2_1)$; \item $X|\Theta = \theta \sim \text{Bernoulli}(\theta)$, $\Theta \sim \text{Beta}(a, b)$; \item $X|\Theta = \theta \sim \text{Geometric}(\theta)$, $\Theta \sim \text{Beta}(a, b)$; \end{itemize} and the convolutions \begin{itemize} \item $X|\Theta = \theta \sim \text{Gamma}(\tau, \theta)$, $\Theta \sim \text{Gamma}(\alpha, \lambda)$; \item $X|\Theta = \theta \sim \text{Binomial}(\nu, \theta)$, $\Theta \sim \text{Beta}(a, b)$; \item $X|\Theta = \theta \sim \text{Negative Binomial}(r, \theta)$ and $\Theta \sim \text{Beta}(a, b)$. \end{itemize} \autoref{sec:formulas} provides the complete formulas for the above combinations of distributions. In addition, \citet[section~2.6]{Buhlmann_Gisler} show that if $X|\Theta = \theta \sim \text{Single Parameter Pareto}(\theta, x_0)$ and $\Theta \sim \text{Gamma}(\alpha, \lambda)$, then the Bayesian estimator of parameter $\theta$ --- not of the risk premium! --- is \begin{equation*} \hat{\Theta} = \eta \hat{\theta}^{\text{MLE}} + (1 - \eta) \frac{\alpha}{\lambda}, \end{equation*} where \begin{equation*} \hat{\theta}^{\text{MLE}} = \frac{n}{\sum_{i = 1}^n \ln (X_i/x_0)} \end{equation*} is the maximum likelihood estimator of $\theta$ and \begin{equation*} \eta = \frac{\sum_{i = 1}^n \ln (X_i/x_0)}{% \lambda + \sum_{i = 1}^n \ln (X_i/x_0)} \end{equation*} is a weight not restricted to $(0, 1)$. (See the \code{"distributions"} package vignette for details on the Single Parameter Pareto distribution.) When argument \code{formula} is \code{"bayes"}, function \code{cm} computes pure Bayesian premiums --- or estimator in the Pareto/Gamma case --- for the combinations of distributions above. We identify which by means of argument \code{likelihood} that must be one of % \code{"poisson"}, % \code{"exponential"}, % \code{"gamma"}, % \code{"normal"}, % \code{"bernoulli"}, % \code{"binomial"}, % \code{"geometric"}, % \code{"negative binomial"} or % \code{"pareto"}. % The parameters of the distribution of $X|\Theta = \theta$, if any, and those of the distribution of $\Theta$ are specified using the argument names (and default values) of \code{dgamma}, \code{dnorm}, \code{dbeta}, \code{dbinom}, \code{dnbinom} or \code{dpareto1}, as appropriate. Consider the case where \begin{align*} X|\Theta = \theta &\sim \text{Poisson}(\theta) \\ \Theta &\sim \text{Gamma}(\alpha, \lambda). \end{align*} The posterior distribution of $\Theta$ is \begin{equation*} \Theta|X_1, \dots, X_n \sim \text{Gamma} \left( \alpha + \sum_{t = 1}^n X_t, \lambda + n \right). \end{equation*} Therefore, the Bayesian premium is \begin{align*} B_{n + 1} &= \E{\mu(\Theta)|X_1, \dots, X_n} \\ &= \E{\Theta|X_1, \dots, X_n} \\ &= \frac{\alpha + \sum_{t = 1}^n X_t}{\lambda + n} \\ &= \frac{n}{n + \lambda}\, \bar{X} + \frac{\lambda}{n + \lambda} \frac{\alpha}{\lambda} \\ &= z \bar{X} + (1 - z) m, \end{align*} with $m = \E{\mu(\Theta)} = \E{\Theta} = \alpha/\lambda$ and \begin{equation*} z = \frac{n}{n + K}, \quad K = \lambda. \end{equation*} One may easily check that if $\alpha = \lambda = 3$ and $X_1 = 5, X_2 = 3, X_3 = 0, X_4 = 1, X_5 = 1$, then $B_6 = 1.625$. We obtain the same result using \code{cm}. <>= x <- c(5, 3, 0, 1, 1) fit <- cm("bayes", x, likelihood = "poisson", shape = 3, rate = 3) fit predict(fit) summary(fit) @ \appendix \section{Linear Bayes formulas} \label{sec:formulas} This appendix provides the main linear Bayes credibility results for combinations of a likelihood function member of the univariate exponential family with its natural conjugate. For each combination, we provide, other than the names of the distributions of $X|\Theta = \theta$ and $\Theta$: \begin{itemize} \item the posterior distribution $\Theta|X_1 = x_1, \dots, X_n = x_n$, always of the same type as the prior, only with updated parameters; \item the risk premium $\mu(\theta) = \E{X|\Theta = \theta}$; \item the collective premium $m = \E{\mu(\Theta)}$; \item the Bayesian premium $B_{n+1} = \E{\mu(\Theta)|X_1, \dots, X_n}$, always equal to the collective premium evaluated at the parameters of the posterior distribution; \item the credibility factor when the Bayesian premium is expressed as a credibility premium. \end{itemize} %% Compact Listes à puce compactes et sans puce, justement. \begingroup \setlist[itemize]{label={},leftmargin=0pt,align=left,nosep} \subsection{Bernoulli/beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Bernoulli}(\theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + \sum_{t = 1}^n x_t \\ \tilde{b} &= b + n - \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \theta \end{equation*} \item Collective premium \begin{equation*} m = \frac{a}{a + b} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{a + \sum_{t = 1}^n X_t}{a + b + n} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + a + b} \end{equation*} \end{itemize} \subsection{Binomial/beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Binomial}(\nu, \theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + \sum_{t = 1}^n x_t \\ \tilde{b} &= b + n \nu - \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \nu \theta \end{equation*} \item Collective premium \begin{equation*} m = \frac{\nu a}{a + b} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\nu (a + \sum_{t = 1}^n X_t)}{a + b + n \nu} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + (a + b)/\nu} \end{equation*} \end{itemize} \subsection{Geometric/Beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Geometric}(\theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + n \\ \tilde{b} &= b + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{1 - \theta}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{b}{a - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{b + \sum_{t = 1}^n X_t}{a + n - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + a - 1} \end{equation*} \end{itemize} \subsection{Negative binomial/Beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Negative binomial}(r, \theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + n r \\ \tilde{b} &= b + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{r (1 - \theta)}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{r b}{a - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{r (b + \sum_{t = 1}^n X_t)}{a + n r - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + (a - 1)/r} \end{equation*} \end{itemize} \subsection{Poisson/Gamma case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Poisson}(\theta)$ \item $\Theta \sim \text{Gamma}(\alpha, \lambda)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Gamma}(\tilde{\alpha}, \tilde{\lambda})$ \begin{align*} \tilde{\alpha} &= \alpha + \sum_{t = 1}^n x_t \\ \tilde{\lambda} &= \lambda + n \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \theta \end{equation*} \item Collective premium \begin{equation*} m = \frac{\alpha}{\lambda} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\alpha + \sum_{t = 1}^n X_t}{\lambda + n} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + \lambda} \end{equation*} \end{itemize} \subsection{Exponential/Gamma case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Exponential}(\theta)$ \item $\Theta \sim \text{Gamma}(\alpha, \lambda)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Gamma}(\tilde{\alpha}, \tilde{\lambda})$ \begin{align*} \tilde{\alpha} &= \alpha + n \\ \tilde{\lambda} &= \lambda + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{1}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{\lambda}{\alpha - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\lambda + \sum_{t = 1}^n X_t}{\alpha + n - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + \alpha - 1} \end{equation*} \end{itemize} \subsection{Gamma/Gamma case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Gamma}(\tau, \theta)$ \item $\Theta \sim \text{Gamma}(\alpha, \lambda)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Gamma}(\tilde{\alpha}, \tilde{\lambda})$ \begin{align*} \tilde{\alpha} &= \alpha + n \tau \\ \tilde{\lambda} &= \lambda + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{\tau}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{\tau \lambda}{\alpha - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\tau (\lambda + \sum_{t = 1}^n X_t)}{\alpha + n \tau - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + (\alpha - 1)/\tau} \end{equation*} \end{itemize} \subsection{Normal/Normal case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Normal}(\theta, \sigma_2^2)$ \item $\Theta \sim \text{Normal}(\mu, \sigma_1^2)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Normal}(\tilde{\mu}, \tilde{\sigma}_1^2)$ \begin{align*} \tilde{\mu} &= \frac{\sigma_1^2 \sum_{t = 1}^n x_t + \sigma_2^2 \mu}{n \sigma_1^2 + \sigma_2^2} \\ \tilde{\sigma}_1^2 &= \frac{\sigma_1^2 \sigma_2^2}{n \sigma_1^2 + \sigma_2^2} \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \theta \end{equation*} \item Collective premium \begin{equation*} m = \mu \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\sigma_1^2 \sum_{t = 1}^n X_t + \sigma_2^2 \mu}{n \sigma_1^2 + \sigma_2^2} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + \sigma_2^2/\sigma_1^2} \end{equation*} \end{itemize} \endgroup \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/vignettes/framed.sty0000644000176200001440000005366115147745722015571 0ustar liggesusers% framed.sty v 0.96 2011/10/22 % Copyright (C) 1992-2011 by Donald Arseneau (asnd@triumf.ca) % These macros may be freely transmitted, reproduced, or modified % for any purpose provided that this notice is left intact. % %====================== Begin Instructions ======================= % % framed.sty % ~~~~~~~~~~ % Create framed, shaded, or differently highlighted regions that can % break across pages. The environments defined are % framed - ordinary frame box (\fbox) with edge at margin % oframed - framed with open top/bottom at page breaks % shaded - shaded background (\colorbox) bleeding into margin % shaded* - shaded background (\colorbox) with edge at margin % snugshade - shaded with tight fit around text (esp. in lists) % snugshade* - like snugshade with shading edge at margin % leftbar - thick vertical line in left margin % % to be used like % \begin{framed} % copious text % \end{framed} % % But the more general purpose of this package is to facilitate the % definition of new environments that take multi-line material, % wrap it with some non-breakable formatting (some kind of box or % decoration) and allow page breaks in the material. Such environments % are defined to declare (or use) \FrameCommand for applying the boxy % decoration, and \MakeFramed{settings} ... \endMakeFramed wrapped % around the main text argument (environment body). % % The "framed" environment uses "\fbox", by default, as its "\FrameCommand" % with the additional settings "\fboxrule=\FrameRule" and "\fboxsep=\FrameSep". % You can change these lengths (using "\setlength") and you can change % the definition of "\FrameCommand" to use much fancier boxes. % % In fact, the "shaded" environment just redefines \FrameCommand to be % "\colorbox{shadecolor}" (and you have to define the color `"shadecolor"': % "\definecolor{shadecolor}..."). % % Although the intention is for other packages to define the varieties % of decoration, a command "\OpenFbox" is defined for frames with open % tops or bottoms, and used for the "oframed" environment. This facility % is based on a more complex and capable command "\CustomFBox" which can % be used for a wider range of frame styles. One such style of a title-bar % frame with continuation marks is provided as an example. It is used by % the "titled-frame" environment. To make use of "titled-frame" in your % document, or the "\TitleBarFrame" command in your own environment % definitions, you must define the colors TFFrameColor (for the frame) % and a contrasting TFTitleColor (for the title text). % % A page break is allowed, and even encouraged, before the framed % environment. If you want to attach some text (a box title) to the % frame, then the text should be inserted by \FrameCommand so it cannot % be separated from the body. % % The contents of the framed regions are restricted: % Floats, footnotes, marginpars and head-line entries will be lost. % (Some of these may be handled in a later version.) % This package will not work with the page breaking of multicol.sty, % or other systems that perform column-balancing. % % The MakeFramed environment does the work. Its `settings' argument % should contain any adjustments to the text width (via a setting of % "\hsize"). Here, the parameter "\width" gives the measured extra width % added by the frame, so a common setting is "\advance\hsize-\width" % which reduces the width of the text just enough that the outer edge % of the frame aligns with the margins. The `settings' should also % include a `restore' command -- "\@parboxrestore" or "\FrameRestore" % or something similar; for instance, the snugshade environment uses % settings to eliminate list indents and vertical space, but uses % "\hspace" in "\FrameCommand" to reproduce the list margin ouside the % shading. % % There are actually four variants of "\FrameCommand" to allow different % formatting for each part of an environment broken over pages. Unbroken % text is adorned by "\FrameCommand", whereas split text first uses % "\FirstFrameCommand", possibly followed by "\MidFrameCommand", and % finishing with "\LastFrameCommand". The default definitions for % these three just invokes "\FrameCommand", so that all portions are % framed the same way. See the oframe environment for use of distinct % First/Mid/Last frames. % % Expert commands: % \MakeFramed, \endMakeFramed: the "MakeFramed" environment % \FrameCommand: command to draw the frame around its argument % \FirstFrameCommand: the frame for the first part of a split environment % \LastFrameCommand: for the last portion % \MidFrameCommand: for any intermediate segments % \FrameRestore: restore some text settings, but fewer than \@parboxrestore % \FrameRule: length register; \fboxrule for default "framed". % \FrameSep: length register; \fboxsep for default "framed". % \FrameHeightAdjust: macro; height of frame above baseline at top of page % \OuterFrameSep: vertical space before and after the framed env. Defaults to "\topsep" % % This is still a `pre-production' version because I can think of many % features/improvements that should be made. Also, a detailed manual needs % to be written. Nevertheless, starting with version 0.5 it should be bug-free. % % ToDo: % Test more varieties of list % Improve and correct documentation % Propagation of \marks % Handle footnotes (how??) floats (?) and marginpars. % Stretchability modification. % Make inner contents height/depth influence placement. %======================== End Instructions ======================== \ProvidesPackage{framed}[2011/10/22 v 0.96: framed or shaded text with page breaks] \newenvironment{framed}% using default \FrameCommand {\MakeFramed {\advance\hsize-\width \FrameRestore}}% {\endMakeFramed} \newenvironment{shaded}{% \def\FrameCommand{\fboxsep=\FrameSep \colorbox{shadecolor}}% \MakeFramed {\FrameRestore}}% {\endMakeFramed} \newenvironment{shaded*}{% \def\FrameCommand{\fboxsep=\FrameSep \colorbox{shadecolor}}% \MakeFramed {\advance\hsize-\width \FrameRestore}}% {\endMakeFramed} \newenvironment{leftbar}{% \def\FrameCommand{\vrule width 3pt \hspace{10pt}}% \MakeFramed {\advance\hsize-\width \FrameRestore}}% {\endMakeFramed} % snugshde: Shaded environment that % -- uses the default \fboxsep instead of \FrameSep % -- leaves the text indent unchanged (shading bleeds out) % -- eliminates possible internal \topsep glue (\@setminipage) % -- shrinks inside the margins for lists % An \item label will tend to hang outside the shading, thanks to % the small \fboxsep. \newenvironment{snugshade}{% \def\FrameCommand##1{\hskip\@totalleftmargin \hskip-\fboxsep \colorbox{shadecolor}{##1}\hskip-\fboxsep % There is no \@totalrightmargin, so: \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% \MakeFramed {\advance\hsize-\width \@totalleftmargin\z@ \linewidth\hsize \@setminipage}% }{\par\unskip\@minipagefalse\endMakeFramed} \newenvironment{snugshade*}{% \def\FrameCommand##1{\hskip\@totalleftmargin \colorbox{shadecolor}{##1}% % There is no \@totalrightmargin, so: \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% \MakeFramed {\advance\hsize-\width \@totalleftmargin\z@ \linewidth\hsize \advance\labelsep\fboxsep \@setminipage}% }{\par\unskip\@minipagefalse\endMakeFramed} \newenvironment{oframed}{% open (top or bottom) framed \def\FrameCommand{\OpenFBox\FrameRule\FrameRule}% \def\FirstFrameCommand{\OpenFBox\FrameRule\z@}% \def\MidFrameCommand{\OpenFBox\z@\z@}% \def\LastFrameCommand{\OpenFBox\z@\FrameRule}% \MakeFramed {\advance\hsize-\width \FrameRestore}% }{\endMakeFramed} % A simplified entry to \CustomFBox with two customized parameters: % the thicknesses of the top and bottom rules. Perhaps we want to % use less \fboxsep on the open edges? \def\OpenFBox#1#2{\fboxsep\FrameSep \CustomFBox{}{}{#1}{#2}\FrameRule\FrameRule} % \CustomFBox is like an amalgamation of \fbox and \@frameb@x, % so it can be used by an alternate to \fbox or \fcolorbox, but % it has more parameters for various customizations. % Parameter #1 is inserted (in vmode) right after the top rule % (useful for a title or assignments), and #2 is similar, but % inserted right above the bottom rule. % The thicknesses of the top, bottom, left, and right rules are % given as parameters #3,#4,#5,#6 respectively. They should be % \fboxrule or \z@ (or some other thickness). % The text argument is #7. % An instance of this can be used for the frame of \fcolorbox by % locally defining \fbox before \fcolorbox; e.g., % \def\fbox{\CustomFBox{}{}\z@\z@\fboxrule\fboxrule}\fcolorbox % % Do we need to use different \fboxsep on different sides too? % \long\def\CustomFBox#1#2#3#4#5#6#7{% \leavevmode\begingroup \setbox\@tempboxa\hbox{% \color@begingroup \kern\fboxsep{#7}\kern\fboxsep \color@endgroup}% \hbox{% % Here we calculate and shift for the depth. Done in % a group because one of the arguments might be \@tempdima % (we could use \dimexpr instead without grouping). \begingroup \@tempdima#4\relax \advance\@tempdima\fboxsep \advance\@tempdima\dp\@tempboxa \expandafter\endgroup\expandafter \lower\the\@tempdima\hbox{% \vbox{% \hrule\@height#3\relax #1% \hbox{% \vrule\@width#5\relax \vbox{% \vskip\fboxsep % maybe these should be parameters too \copy\@tempboxa \vskip\fboxsep}% \vrule\@width#6\relax}% #2% \hrule\@height#4\relax}% }% }% \endgroup } % A particular type of titled frame with continuation marks. % Parameter #1 is the title, repeated on each page. \newenvironment{titled-frame}[1]{% \def\FrameCommand{\fboxsep8pt\fboxrule2pt \TitleBarFrame{\textbf{#1}}}% \def\FirstFrameCommand{\fboxsep8pt\fboxrule2pt \TitleBarFrame[$\blacktriangleright$]{\textbf{#1}}}% \def\MidFrameCommand{\fboxsep8pt\fboxrule2pt \TitleBarFrame[$\blacktriangleright$]{\textbf{#1\ (cont)}}}% \def\LastFrameCommand{\fboxsep8pt\fboxrule2pt \TitleBarFrame{\textbf{#1\ (cont)}}}% \MakeFramed{\advance\hsize-20pt \FrameRestore}}% % note: 8 + 2 + 8 + 2 = 20. Don't use \width because the frame title % could interfere with the width measurement. {\endMakeFramed} % \TitleBarFrame[marker]{title}{contents} % Frame with a label at top, optional continuation marker at bottom right. % Frame color is TFFrameColor and title color is a contrasting TFTitleColor; % both need to be defined before use. The frame itself use \fboxrule and % \fboxsep. If the title is omitted entirely, the title bar is omitted % (use a blank space to force a blank title bar). % \newcommand\TitleBarFrame[3][]{\begingroup \ifx\delimiter#1\delimiter \let\TF@conlab\@empty \else \def\TF@conlab{% continuation label \nointerlineskip \smash{\rlap{\kern\wd\@tempboxa\kern\fboxrule\kern\fboxsep #1}}}% \fi \let\TF@savecolor\current@color \textcolor{TFFrameColor}{% \CustomFBox {\TF@Title{#2}}{\TF@conlab}% \fboxrule\fboxrule\fboxrule\fboxrule {\let\current@color\TF@savecolor\set@color #3}% }\endgroup } % The title bar for \TitleBarFrame \newcommand\TF@Title[1]{% \ifx\delimiter#1\delimiter\else \kern-0.04pt\relax \begingroup \setbox\@tempboxa\vbox{% \kern0.8ex \hbox{\kern\fboxsep\textcolor{TFTitleColor}{#1}\vphantom{Tj)}}% \kern0.8ex}% \hrule\@height\ht\@tempboxa \kern-\ht\@tempboxa \box\@tempboxa \endgroup \nointerlineskip \kern-0.04pt\relax \fi } \chardef\FrameRestore=\catcode`\| % for debug \catcode`\|=\catcode`\% % (debug: insert space after backslash) \newlength\OuterFrameSep \OuterFrameSep=\maxdimen \relax \def\MakeFramed#1{\par % apply default \OuterFrameSep = \topsep \ifdim\OuterFrameSep=\maxdimen \OuterFrameSep\topsep \fi % measure added width and height; call result \width and \height \fb@sizeofframe\FrameCommand \let\width\fb@frw \let\height\fb@frh % insert pre-penalties and skips \begingroup \skip@\lastskip \if@nobreak\else \penalty9999 % updates \page parameters \ifdim\pagefilstretch=\z@ \ifdim\pagefillstretch=\z@ % not infinitely stretchable, so encourage a page break here \edef\@tempa{\the\skip@}% \ifx\@tempa\zero@glue \penalty-30 \else \vskip-\skip@ \penalty-30 \vskip\skip@ \fi\fi\fi \penalty\z@ % Give a stretchy breakpoint that will always be taken in preference % to the \penalty 9999 used to update page parameters. The cube root % of 10000/100 indicates a multiplier of 0.21545, but the maximum % calculated badness is really 8192, not 10000, so the multiplier % is 0.2301. \advance\skip@ \z@ plus-.5\baselineskip \advance\skip@ \z@ plus-.231\height \advance\skip@ \z@ plus-.231\skip@ \advance\skip@ \z@ plus-.231\OuterFrameSep \vskip-\skip@ \penalty 1800 \vskip\skip@ \fi \addvspace{\OuterFrameSep}% \endgroup % clear out pending page break \penalty\@M \vskip 2\baselineskip \vskip\height \penalty9999 \vskip -2\baselineskip \vskip-\height \penalty9999 % updates \pagetotal |\message{After clearout, \pagetotal=\the\pagetotal, \pagegoal=\the\pagegoal. }% \fb@adjheight \setbox\@tempboxa\vbox\bgroup #1% Modifications to \hsize (can use \width and \height) \textwidth\hsize \columnwidth\hsize } \def\endMakeFramed{\par \kern\z@ \hrule\@width\hsize\@height\z@ % possibly bad \penalty-100 % (\hrule moves depth into height) \egroup %%% {\showoutput\showbox\@tempboxa}% \begingroup \fb@put@frame\FrameCommand\FirstFrameCommand \endgroup \@minipagefalse % In case it was set and not cleared } % \fb@put@frame takes the contents of \@tempboxa and puts all, or a piece, % of it on the page with a frame (\FrameCommand, \FirstFrameCommand, % \MidFrameCommand, or \LastFrameCommand). It recurses until all of % \@tempboxa has been used up. (\@tempboxa must have zero depth.) % #1 = attempted framing command, if no split % #2 = framing command if split % First iteration: Try to fit with \FrameCommand. If it does not fit, % split for \FirstFrameCommand. % Later iteration: Try to fit with \LastFrameCommand. If it does not % fit, split for \MidFrameCommand. \def\fb@put@frame#1#2{\relax \ifdim\pagegoal=\maxdimen \pagegoal\vsize \fi | \message{=============== Entering putframe ====================^^J | \pagegoal=\the\pagegoal, \pagetotal=\the\pagetotal. }% \ifinner \fb@putboxa#1% \fb@afterframe \else \dimen@\pagegoal \advance\dimen@-\pagetotal % natural space left on page \ifdim\dimen@<2\baselineskip % Too little room on page | \message{Page has only \the\dimen@\space room left; eject. }% \eject \fb@adjheight \fb@put@frame#1#2% \else % there's appreciable room left on the page \fb@sizeofframe#1% | \message{\string\pagetotal=\the\pagetotal, | \string\pagegoal=\the\pagegoal, | \string\pagestretch=\the\pagestretch, | \string\pageshrink=\the\pageshrink, | \string\fb@frh=\the\fb@frh. \space} | \message{^^JBox of size \the\ht\@tempboxa\space}% \begingroup % temporarily set \dimen@ to be... \advance\dimen@.8\pageshrink % maximum space available on page \advance\dimen@-\fb@frh\relax % max space available for frame's contents %%% LOOKS SUBTRACTED AND ADDED, SO DOUBLE ACCOUNTING! \expandafter\endgroup % expand \ifdim, then restore \dimen@ to real room left on page \ifdim\dimen@>\ht\@tempboxa % whole box does fit | \message{fits in \the\dimen@. }% % ToDo: Change this to use vsplit anyway to capture the marks % MERGE THIS WITH THE else CLAUSE!!! \fb@putboxa#1% \fb@afterframe \else % box must be split | \message{must be split to fit in \the\dimen@. }% % update frame measurement to use \FirstFrameCommand or \MidFrameCommand \fb@sizeofframe#2% \setbox\@tempboxa\vbox{% simulate frame and flexiblity of the page: \vskip \fb@frh \@plus\pagestretch \@minus.8\pageshrink \kern137sp\kern-137sp\penalty-30 \unvbox\@tempboxa}% \edef\fb@resto@set{\boxmaxdepth\the\boxmaxdepth \splittopskip\the\splittopskip}% \boxmaxdepth\z@ \splittopskip\z@ | \message{^^JPadded box of size \the\ht\@tempboxa\space split to \the\dimen@}% % Split box here \setbox\tw@\vsplit\@tempboxa to\dimen@ | \toks99\expandafter{\splitfirstmark}% | \toks98\expandafter{\splitbotmark}% | \message{Marks are: \the\toks99, \the\toks98. }% \setbox\tw@\vbox{\unvbox\tw@}% natural-sized | \message{Natural height of split box is \the\ht\tw@, leaving | \the\ht\@tempboxa\space remainder. }% % If the split-to size > (\vsize-\topskip), then set box to full size. \begingroup \advance\dimen@\topskip \expandafter\endgroup \ifdim\dimen@>\pagegoal | \message{Frame is big -- Use up the full column. }% \dimen@ii\pagegoal \advance\dimen@ii -\topskip \advance\dimen@ii \FrameHeightAdjust\relax \else % suspect this is implemented incorrectly: % If the split-to size > feasible room_on_page, rebox it smaller. \advance\dimen@.8\pageshrink \ifdim\ht\tw@>\dimen@ | \message{Box too tall; rebox it to \the\dimen@. }% \dimen@ii\dimen@ \else % use natural size \dimen@ii\ht\tw@ \fi \fi % Re-box contents to desired size \dimen@ii \advance\dimen@ii -\fb@frh \setbox\tw@\vbox to\dimen@ii \bgroup % remove simulated frame and page flexibility: \vskip -\fb@frh \@plus-\pagestretch \@minus-.8\pageshrink \unvbox\tw@ \unpenalty\unpenalty \ifdim\lastkern=-137sp % whole box went to next page | \message{box split at beginning! }% % need work here??? \egroup \fb@resto@set \eject % (\vskip for frame size was discarded) \fb@adjheight \fb@put@frame#1#2% INSERTED ??? \else % Got material split off at the head \egroup \fb@resto@set \ifvoid\@tempboxa % it all fit after all | \message{box split at end! }% \setbox\@tempboxa\box\tw@ \fb@putboxa#1% \fb@afterframe \else % it really did split | \message{box split as expected. Its reboxed height is \the\ht\tw@. }% \ifdim\wd\tw@>\z@ \wd\tw@\wd\@tempboxa \centerline{#2{\box\tw@}}% ??? \centerline bad idea \else | \message{Zero width means likely blank. Don't frame it (guess)}% \box\tw@ \fi \hrule \@height\z@ \@width\hsize \eject \fb@adjheight \fb@put@frame\LastFrameCommand\MidFrameCommand \fi\fi\fi\fi\fi } \def\fb@putboxa#1{% \ifvoid\@tempboxa \PackageWarning{framed}{Boxa is void -- discard it. }% \else | \message{Frame and place boxa. }% | %{\showoutput\showbox\@tempboxa}% \centerline{#1{\box\@tempboxa}}% \fi } \def\fb@afterframe{% \nointerlineskip \null %{\showoutput \showlists} \penalty-30 \vskip\OuterFrameSep \relax } % measure width and height added by frame (#1 = frame command) % call results \fb@frw and \fb@frh % todo: a mechanism to handle wide frame titles \newdimen\fb@frw \newdimen\fb@frh \def\fb@sizeofframe#1{\begingroup \setbox\z@\vbox{\vskip-5in \hbox{\hskip-5in #1{\hbox{\vrule \@height 4.7in \@depth.3in \@width 5in}}}% \vskip\z@skip}% | \message{Measuring frame addition for \string#1 in \@currenvir\space | gives ht \the\ht\z@\space and wd \the\wd\z@. }% | %{\showoutput\showbox\z@}% \global\fb@frw\wd\z@ \global\fb@frh\ht\z@ \endgroup } \def\fb@adjheight{% \vbox to\FrameHeightAdjust{}% get proper baseline skip from above. \penalty\@M \nointerlineskip \vskip-\FrameHeightAdjust \penalty\@M} % useful for tops of pages \edef\zero@glue{\the\z@skip} \catcode`\|=\FrameRestore % Provide configuration commands: \providecommand\FrameCommand{% \setlength\fboxrule{\FrameRule}\setlength\fboxsep{\FrameSep}% \fbox} \@ifundefined{FrameRule}{\newdimen\FrameRule \FrameRule=\fboxrule}{} \@ifundefined{FrameSep} {\newdimen\FrameSep \FrameSep =3\fboxsep}{} \providecommand\FirstFrameCommand{\FrameCommand} \providecommand\MidFrameCommand{\FrameCommand} \providecommand\LastFrameCommand{\FrameCommand} % Height of frame above first baseline when frame starts a page: \providecommand\FrameHeightAdjust{6pt} % \FrameRestore has parts of \@parboxrestore, performing a similar but % less complete restoration of the default layout. See how it is used in % the "settings" argument of \MakeFrame. Though not a parameter, \hsize % should be set to the desired total line width available inside the % frame before invoking \FrameRestore. \def\FrameRestore{% \let\if@nobreak\iffalse \let\if@noskipsec\iffalse \let\-\@dischyph \let\'\@acci\let\`\@accii\let\=\@acciii % \message{FrameRestore: % \@totalleftmargin=\the \@totalleftmargin, % \rightmargin=\the\rightmargin, % \@listdepth=\the\@listdepth. }% % Test if we are in a list (or list-like paragraph) \ifnum \ifdim\@totalleftmargin>\z@ 1\fi \ifdim\rightmargin>\z@ 1\fi \ifnum\@listdepth>\z@ 1\fi 0>\z@ % \message{In a list: \linewidth=\the\linewidth, \@totalleftmargin=\the\@totalleftmargin, % \parshape=\the\parshape, \columnwidth=\the\columnwidth, \hsize=\the\hsize, % \labelwidth=\the\labelwidth. }% \@setminipage % snug fit around the item. I would like this to be non-global. % Now try to propageate changes of width from \hsize to list parameters. % This is deficient, but a more advanced way to indicate modification to text % dimensions is not (yet) provided; in particular, no separate left/right % adjustment. \advance\linewidth-\columnwidth \advance\linewidth\hsize \parshape\@ne \@totalleftmargin \linewidth \else % Not in list \linewidth=\hsize %\message{No list, set \string\linewidth=\the\hsize. }% \fi \sloppy } %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% actuar/vignettes/Makefile0000644000176200001440000000113515151412460015200 0ustar liggesusers### -*-Makefile-*- to build actuar vignettes ## ## AUTHOR: Vincent Goulet ## List of vignettes to build VIGNETTES = actuar.pdf coverage.pdf credibility.pdf \ distributions.pdf modeling.pdf risk.pdf \ simulation.pdf ## Toolset SWEAVE = "$(R_HOME)/bin/R" CMD Sweave --encoding="utf-8" TEXI2DVI = LATEX=xelatex texi2dvi -b RM = rm -rf all: pdf %.pdf: %.tex share/preamble.tex actuar.bib ${TEXI2DVI} '$<' .PHONY: pdf pdf: ${VIGNETTES} .PHONY: clean clean: ${RM} *.tex *-[0-9][0-9][0-9].pdf \ *.aux *.bbl *.blg *.log *.out *~ Rplots* \ auto/ share/auto/ actuar/vignettes/share/0000755000176200001440000000000015151412457014650 5ustar liggesusersactuar/vignettes/share/preamble.tex0000644000176200001440000000624415151411046017160 0ustar liggesusers\documentclass[11pt,x11names,english]{article} \usepackage{amsmath,amsthm} \usepackage[round]{natbib} \usepackage{doi} \usepackage{babel} \usepackage[autolanguage,np]{numprint} \usepackage[noae]{Sweave} \usepackage{framed} \usepackage{booktabs} \usepackage[shortlabels]{enumitem} %% Fonts \usepackage[babel=true]{microtype} \usepackage{fontenc} \usepackage{unicode-math} \setmainfont{STIXTwoText} [ Extension = .otf, UprightFont = *-Regular, BoldFont = *-SemiBold, ItalicFont = *-Italic, BoldItalicFont = *-SemiBoldItalic, Scale = 1, Ligatures = TeX ] \setmathfont{STIXTwoMath-Regular} [ Extension = .otf, Scale = 1, bold-style = TeX ] \usepackage[book,medium,proportional,lining,scale=0.92]{FiraSans} \usepackage[medium,lining,nomap,scale=0.90]{FiraMono} %% Colors \usepackage{xcolor} \definecolor{link}{rgb}{0,0.4,0.6} % internal links \definecolor{url}{rgb}{0.6,0,0} % external links \definecolor{citation}{rgb}{0,0.5,0} % citations \definecolor{codebg}{named}{LightYellow1} % R code background %% Hyperlinks \usepackage{hyperref} \hypersetup{% pdfauthor={Vincent Goulet}, colorlinks = {true}, linktocpage = {true}, urlcolor = {url}, linkcolor = {link}, citecolor = {citation}, pdfpagemode = {UseOutlines}, pdfstartview = {Fit}, bookmarksopen = {true}, bookmarksnumbered = {true}, bookmarksdepth = {subsubsection}} %% Help for \autoref \addto\extrasenglish{% \def\exampleautorefname{Example}% \def\sectionautorefname{Section}% \def\subsectionautorefname{Section}} %% Sweave Sinput and Soutput environments reinitialized to remove %% default configuration. Space between input and output blocks also %% reduced. \DefineVerbatimEnvironment{Sinput}{Verbatim}{} \DefineVerbatimEnvironment{Soutput}{Verbatim}{} \fvset{listparameters={\setlength{\topsep}{0pt}}} %% Environment Schunk redefined as an hybrid of environments %% snugshade* and leftbar of framed.sty. \makeatletter \renewenvironment{Schunk}{% \setlength{\topsep}{1pt} \def\FrameCommand##1{\hskip\@totalleftmargin \vrule width 2pt\colorbox{codebg}{\hspace{3pt}##1}% % There is no \@totalrightmargin, so: \hskip-\linewidth \hskip-\@totalleftmargin \hskip\columnwidth}% \MakeFramed {\advance\hsize-\width \@totalleftmargin\z@ \linewidth\hsize \advance\labelsep\fboxsep \@setminipage}% }{\par\unskip\@minipagefalse\endMakeFramed} \makeatother %% Flush left enumerate environment. \setlist[enumerate]{leftmargin=*,align=left} %% Example environment \theoremstyle{definition} \newtheorem{example}{Example} \theoremstyle{remark} \newtheorem{rem}{Remark} %% New math commands \newcommand{\E}[1]{E[ #1 ]} \newcommand{\VAR}[1]{\mathrm{Var} [ #1 ]} \newcommand{\LAS}{\mathrm{LAS}} \newcommand{\D}{\displaystyle} \newcommand{\pt}{{\scriptscriptstyle \Sigma}} \newcommand{\mat}[1]{\symbf{#1}} %% New styling commands \newcommand{\pkg}[1]{\textbf{#1}} \newcommand{\code}[1]{\texttt{#1}} \newcommand{\file}[1]{{`\normalfont\textsf{#1}'}} \bibliographystyle{plainnat} actuar/vignettes/actuar.bib0000644000176200001440000003113715151411046015501 0ustar liggesusers@string{AB = {ASTIN Bulletin}} @string{IME = {Insurance: Mathematics and Economics}} @string{MVSVM = {Bulletin of the Swiss Association of Actuaries}} @string{NAAJ = {North American Actuarial Journal}} @Book{Abramowitz:1972, author = {Abramowitz, M. and Stegun, I. A.}, title = {Handbook of Mathematical Functions}, publisher = {Dover}, year = 1972, language = {english} } @Book{Arnold:pareto:2ed, author = {Arnold, B. C.}, title = {{P}areto Distributions}, publisher = {{CRC} {P}ress}, year = 2015, edition = 2, isbn = {978-146658485-3}, language = {english} } @Article{AsmussenRolski_91, author = {Asmussen, S. and Rolski, T.}, title = {Computational methods in risk theory: a matrix-algorithmic approach}, journal = IME, year = 1991, volume = 10, pages = {259-274}, language = {english} } @Article{BJ_87, author = {Bühlmann, H. and Jewell, W. S.}, title = {Hierarchical credibility revisited}, year = 1987, journal = MVSVM, volume = 87, pages = {35-54}, language = {english} } @Article{BS_70, author = {Bühlmann, H. and Straub, E.}, title = {Glaubgwürdigkeit für {S}chadensätze}, year = 1970, journal = MVSVM, volume = 70, pages = {111-133}, language = {german} } @Book{Bateman:1953:2, author = {Bateman, H.}, title = {Higher transcendental functions}, volume = 2, publisher = {McGraw-Hill}, year = 1953, language = {english} } @InCollection{BeekmanFormula_EAS, author = {Kass, R.}, title = {Beekman's convolution formula}, booktitle = {Encyclopedia of actuarial science}, publisher = {Wiley}, year = 2004, editor = {J. L. Teugels and B. Sundt}, volume = 1, ISBN = {0-4708467-6-3}, language = {english} } @Article{Beekman_68, author = {Beekman, J. A.}, title = {Collective risk results}, journal = {Transactions of the Society of Actuaries}, year = 1968, volume = 20, pages = {182-199}, language = {english} } @Article{Buhlmann:regression:1997, author = {Bühlmann, H. and Gisler, A.}, title = {Credibility in the regression case revisited}, journal = AB, year = 1997, volume = 27, pages = {83-98}, language = {english} } @Article{Buhlmann_69, author = {Bühlmann, H.}, title = {Experience rating and credibility}, year = 1969, journal = AB, volume = 5, pages = {157-165}, language = {english} } @Book{Buhlmann_Gisler, author = {Bühlmann, H. and Gisler, A.}, title = {A course in credibility theory and its applications}, publisher = {Springer}, year = 2005, isbn = {3-5402575-3-5}, language = {english} } @Article{Centeno_02, author = {Centeno, M. {d.} L.}, title = {Measuring the effects of reinsurance by the adjustment coefficient in the Sparre-Anderson model}, journal = IME, year = 2002, volume = 30, pages = {37-49}, language = {english} } @Misc{Dalgaard:r-help:2005, author = {Dalgaard, P.}, title = {simulate zero-truncated {P}oisson distribution}, howpublished = {\texttt{r-help} mailing list}, month = {May 1}, year = 2005, url = {https://stat.ethz.ch/pipermail/r-help/2005-May/070680.html}, language = {english} } @Book{Daykin_et_al, author = {Daykin, C.D. and Pentikäinen, T. and Pesonen, M.}, title = {Practical Risk Theory for Actuaries}, publisher = {Chapman \& Hall}, year = 1994, address = {London}, isbn = {0-4124285-0-4}, language = {english} } @Book{DenuitCharpentier1, author = {Denuit, M. and Charpentier, A.}, title = {Mathématiques de l'assurance non-vie}, publisher = {Economica}, year = 2004, volume = {1, Principes fondamentaux de théorie du risque}, address = {Paris}, isbn = {2-7178485-4-1}, language = {francais} } @Manual{GSL, title = {{GNU} Scientific Library Reference Manual}, author = {Galassi, M. and Davies, J. and Theiler, J. and Gough, B. and Jungman, G. and Alken P. and Booth, M. and Rossi, F. and Ulerich, R.}, edition = {Third}, isbn = {0-95461207-8}, url = {https://www.gnu.org/software/gsl/}, language = {english} } @Book{Gerber_MRT, author = {Gerber, H. U.}, title = {An Introduction to Mathematical Risk Theory}, publisher = {Huebner Foundation}, year = 1979, address = {Philadelphia}, language = {english} } @Article{Goulet:lossdist:2008, author = {Goulet, V. and Pigeon, M.}, title = {Statistical Modeling of Loss Distributions Using \pkg{actuar}}, journal = {R News}, year = 2008, volume = 8, number = 1, pages = {34-40}, month = {May}, url = {https://journal.r-project.org/articles/RN-2008-006/}, language = {english} } @Article{Goulet:simpf:2008, author = {Goulet, V. and Pouliot, L.-P.}, title = {Simulation of Compound Hierarchical Models in {R}}, journal = NAAJ, year = 2008, volume = 12, pages = {401-412}, language = {english} } @Article{Goulet_JAP, author = {Goulet, V.}, title = {Principles and application of credibility theory}, year = 1998, journal = {Journal of Actuarial Practice}, volume = 6, pages = {5-62}, language = {english} } @Article{Goulet_cfs, author = {Forgues, A. and Goulet, V. and Lu, J.}, title = {Credibility for severity revisited}, journal = NAAJ, year = 2006, volume = 10, number = 1, pages = {49-62}, language = {english} } @InProceedings{Hachemeister_75, author = {Hachemeister, C. A.}, title = {Credibility for Regression Models with Application to Trend}, year = 1975, booktitle = {Credibility, theory and applications}, series = {Proceedings of the berkeley Actuarial Research Conference on Credibility}, pages = {129-163}, publisher = {Academic Press}, address = {New York}, language = {english} } @Book{HoggKlugman, author = {Hogg, R. V. and Klugman, S. A.}, title = {Loss Distributions}, publisher = {Wiley}, year = 1984, address = {New York}, isbn = {0-4718792-9-0}, language = {english} } @Article{Holla:PIG:1966, author = {Holla, M. S.}, title = {On a {P}oisson-Inverse {G}aussian Distribution}, journal = {Metrika}, year = 1966, volume = 15, pages = {377-384}, language = {english} } @Article{Jewell_75, author = {Jewell, W. S.}, title = {The use of collateral data in credibility theory: a hierarchical model}, year = 1975, journal = {Giornale dell'Istituto Italiano degli Attuari}, volume = 38, pages = {1-16}, language = {english} } @Book{Johnson:discrete:2005, author = {Johnson, N. L. and Kemp, A. W. and Kotz, S.}, title = {Univariate Discrete Distributions}, publisher = {Wiley}, year = 2005, edition = 3, isbn = {978-047127246-5}, language = {english} } @Book{LivreVert, author = {Goovaerts, M. J. and Hoogstad, W. J.}, title = {Credibility theory}, series = {Surveys of actuarial studies}, number = 4, year = 1987, publisher = {Nationale-Nederlanden N.V.}, address = {Netherlands}, language = {english} } @Book{LossModels, author = {Klugman, S. A. and Panjer, H. H. and Willmot, G.}, title = {Loss Models: From Data to Decisions}, publisher = {Wiley}, year = 1998, address = {New York}, isbn = {0-4712388-4-8}, language = {english} } @Book{LossModels2e, author = {Klugman, S. A. and Panjer, H. H. and Willmot, G.}, title = {Loss Models: From Data to Decisions}, edition = 2, publisher = {Wiley}, year = 2004, address = {New York}, isbn = {0-4712157-7-5}, language = {english} } @Book{LossModels3e, author = {Klugman, S. A. and Panjer, H. H. and Willmot, G.}, title = {Loss Models: From Data to Decisions}, edition = 3, publisher = {Wiley}, year = 2008, address = {New York}, isbn = {978-0-4701878-1-4}, language = {english} } @Book{LossModels4e, author = {Klugman, S. A. and Panjer, H. H. and Willmot, G.}, title = {Loss Models: From Data to Decisions}, edition = 4, publisher = {Wiley}, year = 2012, address = {New York}, isbn = {978-1-118-31532-3}, language = {english} } @Book{MART, author = {Kaas, R. and Goovaerts, M. and Dhaene, J. and Denuit, M.}, title = {Modern actuarial risk theory}, publisher = {Kluwer {A}cademic {P}ublishers}, year = 2001, address = {Dordrecht}, isbn = {0-7923763-6-6}, language = {english} } @Book{MART:2e, author = {Kaas, R. and Goovaerts, M. and Dhaene, J. and Denuit, M.}, title = {Modern Actuarial Risk Theory. Using {R}}, edition = 2, publisher = {Springer}, year = 2008, isbn = {978-3-54070992-3}, language = {english} } @Book{MASS, author = {Venables, W. N. and Ripley, B. D.}, title = {Modern applied statistics with {S}}, publisher = {Springer}, year = 2002, edition = 4, address = {New York}, isbn = {0-3879545-7-0}, language = {english} } @Manual{Matrix, title = {Matrix: Sparse and Dense Matrix Classes and Methods}, author = {Bates, D. and Maechler, M. and Jagan, M.}, year = 2025, note = {R package version 1.7-4}, url = {https://cran.r-project.org/package=Matrix}, doi = {10.32614/cran.package.Matrix}, language = {english} } @Book{Neuts_81, author = {Neuts, M. F.}, title = {Matrix-geometric solutions in stochastic models: an algorithmic approach}, publisher = {Dover Publications}, year = 1981, isbn = {978-0-4866834-2-3}, language = {english} } @Unpublished{Ohlsson, author = {Ohlsson, E.}, title = {Simplified estimation of structure parameters in hierarchical credibility}, year = 2005, note = {Presented at the Zurich ASTIN Colloquium}, language = {english} } @Article{Panjer_81, author = {Panjer, H. H.}, title = {Recursive evaluation of a family of compound distributions}, journal = AB, year = 1981, volume = 12, pages = {22-26}, language = {english} } @Manual{R-exts, title = {Writing {R} Extensions}, author = {{R Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = 2025, url = {https://cran.r-project.org/doc/manuals/R-exts.html}, language = {english} } @Article{Scollnik:2001:MCMC, author = {Scollnik, D. P. M.}, title = {Actuarial Modeling with {MCMC} and {BUGS}}, journal = {North American Actuarial Journal}, year = 2001, volume = 5, number = 2, pages = {96-124}, language = {english} } @Article{Shaban:PIG:1981, author = {Shaban, S. A.}, title = {Computation of the {P}oisson-inverse {G}aussian distribution}, journal = {Communications in Statistics -- Theory and Methods}, year = 1981, volume = 10, number = 14, pages = {1389-1399}, language = {english} } @Manual{SuppDists, title = {SuppDists: Supplementary distributions}, author = {Wheeler, B.}, year = 2025, note = {R package version 1.1-9.9}, url = {https://cran.R-project.org/package=SuppDists}, doi = {10.32614/cran.package.SuppDists}, language = {english} } @Book{Thomopoulos:2013:simulation, author = {Thomopoulos, N. T.}, title = {Essentials of Monte Carlo simulation: Statistical methods for building simulation models}, publisher = {Springer}, year = 2013, isbn = {978-146146022-0}, language = {english} } @Article{actuar, author = {Dutang, C and Goulet, V. and Pigeon, M.}, title = {\pkg{actuar}: An {R} Package for Actuarial Science}, journal = {Journal of Statistical Software}, year = 2008, volume = 25, number = 7, url = {https://doi.org/10.18637/jss.v025.i07}, doi = {10.18637/jss.v025.i07}, language = {english} } @Article{cm, author = {Belhadj, H. and Goulet, V. and Ouellet, T.}, title = {On Parameter Estimation in Hierarchical Credibility}, journal = AB, year = 2009, volume = 39, number = 2, language = {english} } @Manual{expint, title = {expint: Exponential Integral and Incomplete Gamma Function}, author = {Goulet, V.}, year = {2026}, note = {R package version 0.2-1}, doi = {10.32614/cran.package.expint}, url = {https://cran.r-project.org/package=expint}, language = {english} } @Article{statmod, author = {Giner, G. and Smyth, G. K.}, title = {\pkg{statmod}: {P}robability calculations for the inverse gaussian distribution}, journal = {{R Journal}}, year = 2016, volume = 8, number = 1, pages = {339-351}, doi = {10.32614/RJ-2016-024}, language = {english} } actuar/vignettes/distributions.Rnw0000644000176200001440000020402315151206331017131 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Additional continuous and discrete distributions} %\VignettePackage{actuar} %\SweaveUTF8 \title{Inventory of continuous and discrete distributions in \pkg{actuar}} \author{Jérémy Déraspe \\ Université Laval \\[2ex] Christophe Dutang \\ Université Paris Dauphine \\[2ex] Vincent Goulet \\ Université Laval \\[2ex] Nicholas Langevin \\ Université Laval \\[2ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} %% Compact, sans label itemize environment for the appendices. \setlist[itemize]{label={},leftmargin=0pt,align=left,nosep,midpenalty=10000} \begin{document} \maketitle \section{Introduction} \label{sec:introduction} R includes functions to compute the probability density function (pdf) or the probability mass function (pmf), the cumulative distribution function (cdf) and the quantile function, as well as functions to generate variates from a fair number of continuous and discrete distributions. For some root \code{foo}, the support functions are named \code{dfoo}, \code{pfoo}, \code{qfoo} and \code{rfoo}, respectively. Package \pkg{actuar} provides \code{d}, \code{p}, \code{q} and \code{r} functions for a large number of continuous size distributions useful for loss severity modeling; for phase-type distributions used in computation of ruin probabilities; for zero-truncated and zero-modified extensions of the discrete distributions commonly used in loss frequency modeling; for the heavy tailed Poisson-inverse Gaussian discrete distribution. The package also introduces support functions to compute raw moments, limited moments and the moment generating function (when it exists) of continuous distributions. \section{Additional continuous size distributions} \label{sec:continuous} The package provides support functions for all the probability distributions found in Appendix~A of \citet{LossModels4e} and not already present in base R, excluding the log-$t$, but including the loggamma distribution \citep{HoggKlugman}, as well as for the Feller--Pareto distribution and related Pareto distributions with a location parameter \citep{Arnold:pareto:2ed}. These distributions mostly fall under the umbrella of extreme value or heavy tailed distributions. \autoref{tab:continuous} lists the distributions supported by \pkg{actuar} along with the root names of the R functions. \autoref{app:continuous} details the formulas implemented and the name of the argument corresponding to each parameter. By default, all functions (except those for the Pareto distribution) use a rate parameter equal to the inverse of the scale parameter. This differs from \citet{LossModels4e} but is better in line with the functions for the gamma, exponential and Weibull distributions in base R. \begin{table} \centering \begin{tabular}{lll} \toprule Family & Distribution & Root \\ \midrule Feller--Pareto & Feller--Pareto & \code{fpareto} \\ & Pareto IV & \code{pareto4} \\ & Pareto III & \code{pareto3} \\ & Pareto II & \code{pareto2} \\ & Transformed beta & \code{trbeta} \\ & Burr & \code{burr} \\ & Loglogistic & \code{llogis} \\ & Paralogistic & \code{paralogis} \\ & Generalized Pareto & \code{genpareto} \\ & Pareto & \code{pareto} \\ & Single-parameter Pareto & \code{pareto1} \\ & Inverse Burr & \code{invburr} \\ & Inverse Pareto & \code{invpareto} \\ & Inverse paralogistic & \code{invparalogis} \\ \midrule Transformed gamma & Transformed gamma & \code{trgamma} \\ & Inverse transformed gamma & \code{invtrgamma} \\ & Inverse gamma & \code{invgamma} \\ & Inverse Weibull & \code{invweibull} \\ & Inverse exponential & \code{invexp} \\ \midrule Other & Loggamma & \code{lgamma} \\ & Gumbel & \code{gumbel} \\ & Inverse Gaussian & \code{invgauss} \\ & Generalized beta & \code{genbeta} \\ \bottomrule \end{tabular} \caption{Probability distributions supported by \pkg{actuar} classified by family and root names of the R functions.} \label{tab:continuous} \end{table} We mostly use the nomenclature of \citet{LossModels4e} to classify the continuous distributions supported by \pkg{actuar}. However, following \citet{Arnold:pareto:2ed}, we regroup distributions of the transformed beta family and variants of the Pareto distribution inside the larger Feller--Pareto family of distributions. \autoref{fig:diagram:fp-family} shows the relationships between the distributions of the Feller--Pareto and transformed beta families. \autoref{fig:diagram:trgamma-family} does the same for the distributions of the transformed gamma and inverse transformed gamma families. \begin{figure} \centering \setlength{\unitlength}{0.7cm} \begin{picture}(16.9,10.75)(-0.7,-0.4) \small % Flèches \put(8,6){\vector(2,-1){3.7}} % trbeta -> invburr \put(13,4.2){\vector(0,1){0.95}} % invburr -> invparalogis \put(11.7,3.1){\line(-1,-1){1}} \put(10.7,2.1){\line(-1,0){7.7}} \put(3,2.1){\vector(-1,-1){1.1}} % invburr -> llogis \put(13,3){\vector(0,-1){2}} % invburr -> invpareto \put(2.05,3.1){\vector(2,-1){4.2}} % burr -> pareto \put(1,3){\vector(0,-1){2}} % burr -> llogis \put(6,6){\vector(-2,-1){3.85}} % trbeta -> burr \put(1,4.2){\vector(0,1){0.95}} % burr -> paralogis \put(7,6){\vector(0,-1){1.8}} % trbeta -> genpareto \put(7,9){\vector(0,-1){1.8}} % fpareto -> trbeta \put(7,3){\vector(0,-1){2}} % genpareto -> pareto \put(8,3){\vector(2,-1){4}} % genpareto -> invpareto % \put(6,9){\vector(-2,-1){3.3}} % fpareto -> pareto3 % \put(8,9){\vector(2,-1){3.3}} % fpareto -> pareto1 \put(1,9){\vector(0,-1){1.1}} % pareto4 -> pareto3 \put(13,9){\vector(0,-1){1.1}} % pareto2 -> pareto1 \put(4.5,9.6){\vector(-1,0){1.75}} % fpareto -> pareto4 \put(9.5,9.6){\vector(1,0){1.75}} % fpareto -> pareto2 \put(14.7,9.6){\line(1,0){1.5}} % pareto2 -> pareto \put(16.2,9.6){\line(0,-1){10}} \put(16.2,-0.4){\line(-1,0){7.5}} \put(8.7,-0.4){\vector(-2,1){0.72}} \put(14.8,9.62){\makebox(0,0.5)[l]{$\mu = 0$}} \put(7,9.65){\makebox(0,0.5)[c]{Feller-Pareto}} \put(7,9.1){\makebox(0,0.5)[c]{$\mu, \alpha, \gamma, \tau, \theta$}} \put(7,9.6){\oval(5,1.2)} \put(3.2,9.65){\makebox(0,0.5)[l]{$\tau = 1$}} \put(1,9.65){\makebox(0,0.5)[c]{Pareto IV}} \put(1,9.1){\makebox(0,0.5)[c]{$\mu, \alpha, \gamma, \theta$}} \put(1,9.6){\oval(3.4,1.2)} \put(9.8,9.05){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(9.8,9.65){\makebox(0,0.5)[l]{$\tau = 1$}} \put(13,9.65){\makebox(0,0.5)[c]{Pareto II}} \put(13,9.1){\makebox(0,0.5)[c]{$\mu,\alpha, \theta$}} \put(13,9.6){\oval(3.4,1.2)} \put(0.8,8.3){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,7.35){\makebox(0,0.5)[c]{Pareto III}} \put(1,6.8){\makebox(0,0.5)[c]{$\mu, \gamma, \theta$}} \put(1,7.3){\oval(3.4,1.2)} \put(13.2,8.3){\makebox(0,0.5)[l]{$\mu = \theta$}} \put(13,7.35){\makebox(0,0.5)[c]{Pareto I}} \put(13,6.8){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(13,7.3){\oval(3.4,1.2)} \put(7.2,7.9){\makebox(0,0.5)[l]{$\mu = 0$}} \put(7,6.65){\makebox(0,0.5)[c]{Transformed beta}} \put(7,6.1){\makebox(0,0.5)[c]{$\alpha, \gamma, \tau, \theta$}} \put(7,6.6){\oval(5,1.2)} \put(9.2,5.4){\rotatebox{-26.6}{\makebox(0,0.5)[l]{$\alpha = 1$}}} \put(13.20,3.65){\makebox(0,0.5)[c]{Inverse Burr}} \put(13.20,3.1){\makebox(0,0.5)[c]{$\gamma, \tau, \theta$}} \put(13.20,3.6){\oval(3.4,1.2)} \put(13.2,4.3){\makebox(0,0.5)[l]{$\gamma = \tau$}} \put(13.20,5.80){\makebox(0,0.5)[c]{Inverse paralogistic}} \put(13.20,5.25){\makebox(0,0.5)[c]{$\tau, \theta$}} \put(13.20,5.75){\oval(5.4,1.2)} \put(13.2,1.9){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(13.20,0.45){\makebox(0,0.5)[c]{Inverse Pareto}} \put(13.20,-0.1){\makebox(0,0.5)[c]{$\tau, \theta$}} \put(13.20,0.4){\oval(3.9,1.2)} \put(7.2,4.9){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(7,3.65){\makebox(0,0.5)[c]{Generalized Pareto}} \put(7,3.1){\makebox(0,0.5)[c]{$\alpha, \tau, \theta$}} \put(7,3.6){\oval(4.9,1.2)} \put(7.2,1.25){\makebox(0,0.5)[l]{$\tau = 1$}} \put(7,0.45){\makebox(0,0.5)[c]{Pareto}} \put(7,-0.1){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(7,0.4){\oval(2.2,1.2)} \put(4.5,5.4){\rotatebox{26.6}{\makebox(0,0.5)[r]{$\tau = 1$}}} \put(1,3.65){\makebox(0,0.5)[c]{Burr}} \put(1,3.1){\makebox(0,0.5)[c]{$\alpha, \gamma, \theta$}} \put(1,3.6){\oval(2.5,1.2)} \put(0.8,4.3){\makebox(0,0.5)[r]{$\gamma = \alpha$}} \put(1,5.80){\makebox(0,0.5)[c]{Paralogistic}} \put(1,5.25){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(1,5.75){\oval(3.4,1.2)} \put(0.8,1.9){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,0.45){\makebox(0,0.5)[c]{Loglogistic}} \put(1,-0.1){\makebox(0,0.5)[c]{$\gamma, \theta$}} \put(1,0.4){\oval(3.4,1.2)} \put(9.8,2.1){\rotatebox{-26.6}{\makebox(0,0.5)[r]{$\alpha = 1$}}} \put(4.0,2.1){\rotatebox{-26.6}{\makebox(0,0.5)[r]{$\gamma = 1$}}} \put(11.25,3.0){\rotatebox{45}{\makebox(0,0.5)[r]{$\tau = 1$}}} \end{picture} \caption{Interrelations between distributions of the Feller--Pareto family. This diagram is an extension of Figure~5.2 of \citet{LossModels4e}.} \label{fig:diagram:fp-family} \end{figure} \begin{figure} \setlength{\unitlength}{0.7cm} \begin{picture}(7.5,5.2)(-0.25,0) \small % Flèches \put(4,4){\vector(2,-1){1.55}} % trgamma -> weibull \put(5.55,2){\vector(-2,-1){1.55}} % weibull -> exp \put(1.55,2){\vector(2,-1){1.55}} % gamma -> exp \put(3,4){\vector(-2,-1){1.55}} % trgamma -> gamma \put(3.5,4.65){\makebox(0,0.5)[c]{Transformed gamma}} \put(3.5,4.1){\makebox(0,0.5)[c]{$\alpha, \tau, \lambda$}} \put(3.5,4.6){\oval(5.5,1.2)} \put(5.4,3.45){\makebox(0,0.5)[l]{$\alpha = 1$}} \put(6,2.65){\makebox(0,0.5)[c]{Weibull}} \put(6,2.1){\makebox(0,0.5)[c]{$\tau, \lambda$}} \put(6,2.6){\oval(2.5,1.2)} \put(5.4,1.35){\makebox(0,0.5)[l]{$\tau = 1$}} \put(3.5,0.65){\makebox(0,0.5)[c]{Exponential}} \put(3.5,0.1){\makebox(0,0.5)[c]{$\lambda$}} \put(3.5,0.6){\oval(3.5,1.2)} \put(1.6,1.35){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,2.65){\makebox(0,0.5)[c]{Gamma}} \put(1,2.1){\makebox(0,0.5)[c]{$\alpha, \lambda$}} \put(1,2.6){\oval(2.5,1.2)} \put(1.6,3.45){\makebox(0,0.5)[r]{$\tau = 1$}} \end{picture} \hfill \begin{picture}(8.75,5.2)(-0.875,0) \small % Flèches \put(4,4){\vector(2,-1){1.55}} % trgamma -> weibull \put(5.55,2){\vector(-2,-1){1.55}} % weibull -> exp \put(1.55,2){\vector(2,-1){1.55}} % gamma -> exp \put(3,4){\vector(-2,-1){1.55}} % trgamma -> gamma \put(3.5,4.65){\makebox(0,0.5)[c]{Inverse transformed gamma}} \put(3.5,4.1){\makebox(0,0.5)[c]{$\alpha, \tau, \lambda$}} \put(3.5,4.6){\oval(7,1.2)} \put(5.4,3.45){\makebox(0,0.5)[l]{$\alpha = 1$}} \put(6,2.65){\makebox(0,0.5)[c]{Inverse Weibull}} \put(6,2.1){\makebox(0,0.5)[c]{$\tau, \lambda$}} \put(6,2.6){\oval(4,1.2)} \put(5.4,1.35){\makebox(0,0.5)[l]{$\tau = 1$}} \put(3.5,0.65){\makebox(0,0.5)[c]{Inverse exponential}} \put(3.5,0.1){\makebox(0,0.5)[c]{$\lambda$}} \put(3.5,0.6){\oval(5,1.2)} \put(1.6,1.35){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,2.65){\makebox(0,0.5)[c]{Inverse gamma}} \put(1,2.1){\makebox(0,0.5)[c]{$\alpha, \lambda$}} \put(1,2.6){\oval(4,1.2)} \put(1.6,3.45){\makebox(0,0.5)[r]{$\tau = 1$}} \end{picture} \caption{Interrelations between distributions of the transformed gamma and inverse transformed gamma families. Diagrams derived from Figure~5.3 of \citet{LossModels4e}.} \label{fig:diagram:trgamma-family} \end{figure} In addition to the \code{d}, \code{p}, \code{q} and \code{r} functions, \pkg{actuar} introduces \code{m}, \code{lev} and \code{mgf} functions to compute, respectively, the theoretical raw moments \begin{equation*} m_k = \E{X^k}, \end{equation*} the theoretical limited moments \begin{equation*} \E{(X \wedge x)^k} = \E{\min(X, x)^k} \end{equation*} and the moment generating function \begin{equation*} M_X(t) = \E{e^{tX}}, \end{equation*} when it exists. Every distribution of \autoref{tab:continuous} is supported, along with the following distributions of base R: beta, exponential, chi-square, gamma, lognormal, normal (no \code{lev}), uniform and Weibull. The \code{m} and \code{lev} functions are especially useful for estimation methods based on the matching of raw or limited moments; see the \code{lossdist} vignette for their empirical counterparts. The \code{mgf} functions come in handy to compute the adjustment coefficient in ruin theory; see the \code{risk} vignette. \section{Phase-type distributions} \label{sec:phase-type} In addition to the 19 distributions of \autoref{tab:continuous}, the package provides support for a family of distributions deserving a separate presentation. Phase-type distributions \citep{Neuts_81} are defined as the distribution of the time until absorption of continuous time, finite state Markov processes with $m$ transient states and one absorbing state. Let \begin{equation} \label{eq:Markov-transition-matrix} \mat{Q} = \begin{bmatrix} \mat{T} & \mat{t} \\ \mat{0} & 0 \end{bmatrix} \end{equation} be the transition rates matrix (or intensity matrix) of such a process and let $(\mat{\pi}, \pi_{m + 1})$ be the initial probability vector. Here, $\mat{T}$ is an $m \times m$ non-singular matrix with $t_{ii} < 0$ for $i = 1, \dots, m$ and $t_{ij} \geq 0$ for $i \neq j$, $\mat{t} = - \mat{T} \mat{e}$ and $\mat{e}$ is a column vector with all components equal to 1. Then the cdf of the time until absorption random variable with parameters $\mat{\pi}$ and $\mat{T}$ is \begin{equation} \label{eq:cdf-phtype} F(x) = \begin{cases} \pi_{m + 1}, & x = 0, \\ 1 - \mat{\pi} e^{\mat{T} x} \mat{e}, & x > 0, \end{cases} \end{equation} where \begin{equation} \label{eq:matrix-exponential} e^{\mat{M}} = \sum_{n = 0}^\infty \frac{\mat{M}^n}{n!} \end{equation} is the matrix exponential of matrix $\mat{M}$. The exponential distribution, the Erlang (gamma with integer shape parameter) and discrete mixtures thereof are common special cases of phase-type distributions. The package provides \code{d}, \code{p}, \code{r}, \code{m} and \code{mgf} functions for phase-type distributions. The root is \code{phtype} and parameters $\mat{\pi}$ and $\mat{T}$ are named \code{prob} and \code{rates}, respectively; see also \autoref{app:phase-type}. For the package, function \code{pphtype} is central to the evaluation of the ruin probabilities; see \code{?ruin} and the \code{risk} vignette. \section{Extensions to standard discrete distributions} \label{sec:discrete} The package introduces support functions for additional counting distributions commonly used in loss frequency modeling. A counting distribution is a discrete distribution defined on the non-negative integers $0, 1, 2, \dots$. Let $N$ be the counting random variable. We denote $p_k$ the probability that the random variable $N$ takes the value $k$, that is: \begin{equation*} p_k = \Pr[N = k]. \end{equation*} \citet{LossModels4e} classify counting distributions in two main classes. First, a discrete random variable is a member of the $(a, b, 0)$ class of distributions if there exists constants $a$ and $b$ such that \begin{equation*} \frac{p_k}{p_{k - 1}} = a + \frac{b}{k}, \quad k = 1, 2, \dots. \end{equation*} The probability at zero, $p_0$, is set such that $\sum_{k = 0}^\infty p_k = 1$. The members of this class are the Poisson, the binomial, the negative binomial and its special case, the geometric. These distributions are all well supported in base R with \code{d}, \code{p}, \code{q} and \code{r} functions. The second class of distributions is the $(a, b, 1)$ class. A discrete random variable is a member of the $(a, b, 1)$ class of distributions if there exists constants $a$ and $b$ such that \begin{equation*} \frac{p_k}{p_{k - 1}} = a + \frac{b}{k}, \quad k = 2, 3, \dots. \end{equation*} One will note that recursion starts at $k = 2$ for the $(a, b, 1)$ class. Therefore, the probability at zero can be any arbitrary number $0 \leq p_0 \leq 1$. Setting $p_0 = 0$ defines a subclass of so-called \emph{zero-truncated} distributions. The members of this subclass are the zero-truncated Poisson, the zero-truncated binomial, the zero-truncated negative binomial and the zero-truncated geometric. Let $p_k^T$ denote the probability mass in $k$ for a zero-truncated distribution. As above, $p_k$ denotes the probability mass for the corresponding member of the $(a, b, 0)$ class. We have \begin{equation*} p_k^T = \begin{cases} 0, & k = 0 \\ \displaystyle\frac{p_k}{1 - p_0}, & k = 1, 2, \dots. \end{cases} \end{equation*} Moreover, let $P(k)$ denotes the cumulative distribution function of a member of the $(a, b, 0)$ class. Then the cdf $P^T(k)$ of the corresponding zero-truncated distribution is \begin{equation*} P^T(k) = \frac{P(k) - P(0)}{1 - P(0)} = \frac{P(k) - p_0}{1 - p_0} \end{equation*} for all $k = 0, 1, 2, \dots$. Alternatively, the survival function $\bar{P}^T(k) = 1 - P^T(k)$ is \begin{equation*} \bar{P}^T(k) = \frac{\bar{P}(k)}{\bar{P}(0)} = \frac{\bar{P}(k)}{1 - p_0}. \end{equation*} Finally, let $\kappa_n$ denote the $n$\textsuperscript{th} cumulant of a distribution. The first three cumulants of a zero-truncated distribution are \begin{equation} \label{eq:cumulants} \begin{split} \kappa_1^T &= c \kappa_1 \\ \kappa_2^T &= c \kappa_2 + (1 - c) c \kappa_1^2 \\ \kappa_3^T &= c m_3 - 3 c^2 m_1 m_2 + 2 c^3 m_1^3, \end{split} \end{equation} where $m_k$ is the $k$\textsuperscript{th} raw moment as defined in \autoref{sec:continuous}, and with \begin{equation*} c = \frac{1}{1 - p_0}. \end{equation*} Package \pkg{actuar} provides \code{d}, \code{p}, \code{q} and \code{r} functions for the all the zero-truncated distributions mentioned above. \autoref{tab:discrete} lists the root names of the functions; see \autoref{app:discrete} for additional details. \begin{table} \centering \begin{tabular}{ll} \toprule Distribution & Root \\ \midrule Zero-truncated Poisson & \code{ztpois} \\ Zero-truncated binomial & \code{ztbinom} \\ Zero-truncated negative binomial & \code{ztnbinom} \\ Zero-truncated geometric & \code{ztgeom} \\ Logarithmic & \code{logarithmic} \\ \addlinespace[6pt] Zero-modified Poisson & \code{zmpois} \\ Zero-modified binomial & \code{zmbinom} \\ Zero-modified negative binomial & \code{zmnbinom} \\ Zero-modified geometric & \code{zmgeom} \\ Zero-modified logarithmic & \code{zmlogarithmic} \\ \bottomrule \end{tabular} \caption{Members of the $(a, b, 1)$ class of discrete distributions supported by \pkg{actuar} and root names of the R functions.} \label{tab:discrete} \end{table} An entry of \autoref*{tab:discrete} deserves a few additional words. The logarithmic (or log-series) distribution with parameter $\theta$ has pmf \begin{equation*} p_k = \frac{a \theta^x}{k}, \quad k = 1, 2, \dots, \end{equation*} with $a = -1/\log(1 - \theta)$ and for $0 \leq \theta < 1$. This is the standard parametrization in the literature \citep{Johnson:discrete:2005}. The logarithmic distribution is always defined on the strictly positive integers. As such, it is not qualified as ``zero-truncated'', but it nevertheless belongs to the $(a, b, 1)$ class of distributions, more specifically to the subclass with $p_0 = 0$. Actually, the logarithmic distribution is the limiting case of the zero-truncated negative binomial distribution with size parameter equal to zero and $\theta = 1 - p$, where $p$ is the probability of success for the zero-truncated negative binomial. Note that this differs from the presentation in \citet{LossModels4e}. Another subclass of the $(a, b, 1)$ class of distributions is obtained by setting $p_0$ to some arbitrary number $p_0^M$ subject to $0 < p_0^M \leq 1$. The members of this subclass are called \emph{zero-modified} distributions. Zero-modified distributions are discrete mixtures between a degenerate distribution at zero and the corresponding distribution from the $(a, b, 0)$ class. Let $p_k^M$ and $P^M(k)$ denote the pmf and cdf of a zero-modified distribution. Written as a mixture, the pmf is \begin{equation} \label{eq:mixture} p_k^M = \left(1 - \frac{1 - p_0^M}{1 - p_0} \right) \mathbb{1}_{\{k = 0\}} + \frac{1 - p_0^M}{1 - p_0}\, p_k. \end{equation} Alternatively, we have \begin{equation*} p_k^M = \begin{cases} p_0^M, & k = 0 \\ \displaystyle\frac{1 - p_0^M}{1 - p_0}\, p_k, & k = 1, 2, \dots \end{cases} \end{equation*} and \begin{align*} P^M(k) &= p_0^M + (1 - p_0^M) \frac{P(k) - P(0)}{1 - P(0)} \\ &= p_0^M + \frac{1 - p_0^M}{1 - p_0}\, (P(k) - p_0) \\ &= p_0^M + (1 - p_0^M)\, P^T(k) \end{align*} for all $k = 0, 1, 2, \dots$. The survival function is \begin{equation*} \bar{P}^M(k) = (1 - p_0^M)\, \frac{\bar{P}(k)}{\bar{P}(0)} = \frac{1 - p_0^M}{1 - p_0}\, \bar{P}(k) = (1 - p_0^M)\, \bar{P}^T(k). \end{equation*} Therefore, we can also write the pmf of a zero-modified distribution as a mixture of a degenerate distribution at zero and the corresponding zero-truncated distribution: \begin{equation} \label{eq:mixture:alt} p_k^M = p_0^M \mathbb{1}_{\{k = 0\}} + (1 - p_0^M)\, p_k^T. \end{equation} The first three cumulants $\kappa_1^M$, $\kappa_2^M$, $\kappa_3^M$ of a zero-modified distribution are given by the corresponding right hand sides of \eqref{eq:cumulants} with \begin{equation} \label{eq:c-in-cumulants-zm} c = \frac{1 - p_0^M}{1 - p_0}. \end{equation} The members of the subclass are the zero-modified Poisson, zero-modified binomial, zero-modified negative binomial and zero-modified geometric, together with the zero-modified logarithmic as a limiting case of the zero-modified negative binomial. \autoref{tab:discrete} lists the root names of the support functions provided in \pkg{actuar}; see also \autoref{app:discrete}. Clearly, zero-truncated distributions are zero-modified distributions with $p_0^M = 0$. However, using the dedicated functions in R will be more efficient. \section{Poisson-inverse Gaussian distribution} \label{sec:pig} The Poisson-inverse Gaussian (PIG) distribution results from the continuous mixture between a Poisson distribution and an inverse Gaussian. That is, the Poisson-inverse Gaussian is the (marginal) distribution of the random variable $X$ when the conditional random variable $X|\Lambda = \lambda$ is Poisson with parameter $\lambda$ and the random variable $\Lambda$ is inverse Gaussian with parameters $\mu$ and $\phi$. The literature proposes many different expressions for the pmf of the PIG \citep{Holla:PIG:1966,Shaban:PIG:1981,Johnson:discrete:2005,LossModels4e}. Using the parametrization for the inverse Gaussian found in \autoref{app:continuous}, we have: \begin{equation} \label{eq:pig:px} \begin{split} p_x &= \sqrt{\frac{2}{\pi \phi}} \frac{e^{(\phi\mu)^{-1}}}{x!} \left( \sqrt{2\phi \left( 1 + \frac{1}{2\phi\mu^2} \right)} \right)^{-\left( x - \frac{1}{2} \right)} \\ &\phantom{=} \times K_{x - \frac{1}{2}} \left( \sqrt{\frac{2}{\phi}\left(1 + \frac{1}{2\phi\mu^2}\right)} \right), \end{split} \end{equation} for $x = 0, 1, \dots$, $\mu > 0$, $\phi > 0$ and where \begin{equation} \label{eq:bessel_k} K_\nu(ax) = \frac{a^{-\nu}}{2} \int_0^\infty t^{\nu - 1} e^{- z(t + at^{-1})/2} dt, \quad a^2 z > 0 \end{equation} is the modified Bessel function of the third kind \citep{Bateman:1953:2,Abramowitz:1972}. One may compute the probabilities $p_x$, $x = 0, 1, \dots$ recursively using the following equations: \begin{equation} \label{eq:pig:px:recursive} \begin{split} p_0 &= \exp\left\{ \frac{1}{\phi\mu} \left(1 - \sqrt{1 + 2\phi\mu^2}\right) \right\} \\ p_1 &= \frac{\mu}{\sqrt{1 + 2\phi\mu^2}}\, p_0 \\ p_x &= \frac{2\phi\mu^2}{1 + 2\phi\mu^2} \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{\mu^2}{1 + 2\phi\mu^2} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{split} \end{equation} The first moment of the distribution is $\mu$. The second and third central moment are, respectively, \begin{align*} \mu_2 &= \sigma^2 = \mu + \phi\mu^3 \\ \mu_3 &= \mu + 3 \phi \mu^2 \sigma^2. \end{align*} For the limiting case $\mu = \infty$, the underlying inverse Gaussian has an inverse chi-squared distribution. The latter has no finite strictly positive, integer moments and, consequently, neither does the Poisson-inverse Gaussian. See \autoref{app:discrete:pig} for the formulas in this case. \section{Special integrals} \label{sec:special-integrals} Many of the cumulative distribution functions of \autoref{app:continuous} are expressed in terms of the incomplete gamma function or the incomplete beta function. From a probability theory perspective, the incomplete gamma function is usually defined as \begin{equation} \label{eq:pgamma} \Gamma(\alpha; x) = \frac{1}{\Gamma(\alpha)} \int_0^x t^{\alpha - 1} e^{-t}\, dt, \quad \alpha > 0, x > 0, \end{equation} with \begin{equation*} \Gamma(\alpha) = \int_0^\infty t^{\alpha - 1} e^{-t}\, dt, \end{equation*} whereas the (regularized) incomplete beta function is defined as \begin{equation} \label{eq:pbeta} \beta(a, b; x) = \frac{1}{\beta(a, b)} \int\limits_0^x t^{a - 1} (1 - t)^{b - 1}\, dt, \quad a > 0, b > 0, 0 < x < 1, \end{equation} with \begin{equation*} \beta(a, b) = \int_0^1 t^{a - 1} (1 - t)^{b - 1}\, dt = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a + b)}. \end{equation*} Now, there exist alternative definitions of the these functions that are valid for negative values of the parameters. \citet{LossModels4e} introduce them to extend the range of admissible values for limited expected value functions. First, following \citet[Section~6.5]{Abramowitz:1972}, we define the ``extended'' incomplete gamma function as \begin{equation} \label{eq:gammainc} G(\alpha; x) = \int_x^\infty t^{\alpha - 1} e^{-t}\, dt \end{equation} for $\alpha$ real and $x > 0$. When $\alpha > 0$, we clearly have \begin{equation} \label{eq:gammainc:apos} G(\alpha; x) = \Gamma(a) [1 - \Gamma(\alpha; x)]. \end{equation} The integral is also defined for $\alpha \le 0$. As outlined in \citet[Appendix~A]{LossModels4e}, integration by parts of \eqref{eq:gammainc} yields the relation \begin{equation*} G(\alpha; x) = -\frac{x^\alpha e^{-x}}{\alpha} + \frac{1}{\alpha} G(\alpha + 1; x). \end{equation*} This process can be repeated until $\alpha + k$ is a positive number, in which case the right hand side can be evaluated with \eqref{eq:gammainc:apos}. If $\alpha = 0, -1, -2, \dots$, this calculation requires the value of \begin{equation*} \label{eq:expint} G(0; x) = \int_x^\infty \frac{e^{-t}}{t}\, dt = E_1(x), \end{equation*} which is known in the literature as the \emph{exponential integral} \citep[Section~5.1]{Abramowitz:1972}. Second, as seen in \citet[Section~6.6]{Abramowitz:1972}, we have the following relation for the integral on the right hand side of \eqref{eq:pbeta}: \begin{equation*} \int\limits_0^x t^{a - 1} (1 - t)^{b - 1}\, dt = \frac{x^a}{a}\, F(a, 1 - b; a + 1; x), \end{equation*} where \begin{equation*} F(a, b; c; z) = \frac{\Gamma(c)}{\Gamma(a) \Gamma(b)} \sum_{k = 0}^\infty \frac{\Gamma(a + k) \Gamma(b + k)}{\Gamma(c + k)} \frac{z^k}{k!} \end{equation*} is the Gauss hypergeometric series. With the above definition, the incomplete beta function also admits negative, non integer values for parameters $a$ and $b$. Now, let \begin{equation} \label{eq:betaint} B(a, b; x) = \Gamma(a + b) \int_0^x t^{a-1} (1-t)^{b-1} dt \end{equation} for $a > 0$, $b \neq -1, -2, \dots$ and $0 < x < 1$. Again, it is clear that when $b > 0$, \begin{equation*} B(a, b; x) = \Gamma(a) \Gamma(b) \beta(a, b; x). \end{equation*} Of more interest here is the case where $b < 0$, $b \neq -1, -2, \dots$ and $a > 1 + \lfloor -b\rfloor$. Integration by parts of \eqref{eq:betaint} yields \begin{equation} \label{eq:betaint:2} \begin{split} B(a, b; x) &= \displaystyle -\Gamma(a + b) \left[ \frac{x^{a-1} (1-x)^b}{b} + \frac{(a-1) x^{a-2} (1-x)^{b+1}}{b (b+1)} \right. \\ &\phantom{=} \displaystyle\left. + \cdots + \frac{(a-1) \cdots (a-r) x^{a-r-1} (1-x)^{b+r}}{b (b+1) \cdots (b+r)} \right] \\ &\phantom{=} \displaystyle + \frac{(a-1) \cdots (a-r-1)}{b (b+1) \cdots (b+r)} \Gamma(a-r-1) \\ &\phantom{=} \times \Gamma(b+r+1) \beta(a-r-1, b+r+1; x), \end{split} \end{equation} where $r = \lfloor -b\rfloor$. For the needs of \pkg{actuar}, we dubbed \eqref{eq:betaint} the \emph{beta integral}. Package \pkg{actuar} includes a C implementation of \eqref{eq:betaint:2} and imports functionalities of package \pkg{expint} \citep{expint} to compute the incomplete gamma function \eqref{eq:gammainc} at the C level. The routines are used to evaluate the limited expected value for distributions of the Feller--Pareto and transformed gamma families. \section{Package API: accessing the C routines} \label{sec:api} The actual workhorses behind the R functions presented in this document are C routines that the package exposes to other packages through an API. The header file \file{include/actuarAPI.h} in the package installation directory contains declarations for % the continuous distributions of \autoref{app:continuous}, % the phase-type distributions of \autoref{app:phase-type}, % the discrete distributions of \autoref{app:discrete}, % and the beta integral of \autoref{sec:special-integrals}. The prototypes of the C routines for probability distributions all follow the same pattern modeled after those of base R \citep[Chapter~6]{R-exts}. As an example, here are the prototypes for the Pareto distribution: \begin{Schunk} \begin{Sinput} double dpareto(double x, double shape, double scale, int give_log); double ppareto(double q, double shape, double scale, int lower_tail, int log_p); double qpareto(double p, double shape, double scale, int lower_tail, int log_p); double rpareto(double shape, double scale); double mpareto(double order, double shape, double scale, int give_log); double levpareto(double limit, double shape, double scale, double order, int give_log); \end{Sinput} \end{Schunk} For the beta integral \eqref{eq:betaint:2}, the frontend is a routine \code{betaint} that returns \code{NA} or \code{NaN} for out-of-range arguments, but actual computation is done by routine \code{betaint\_raw}. Both are exposed as follows in the API: \begin{Schunk} \begin{Sinput} double betaint(double x, double a, double b); double betaint_raw(double x, double a, double b, double x1m); \end{Sinput} \end{Schunk} The developer of some package \pkg{pkg} who wants to use a routine --- say \code{dpareto} --- in her code should proceed as follows. \begin{enumerate} \item Add \pkg{actuar} to the \code{Imports} and \code{LinkingTo} directives of the \file{DESCRIPTION} file of \pkg{pkg}; \item Add an entry \code{import(actuar)} in the \file{NAMESPACE} file of \pkg{pkg}; \item Define the routine with a call to \code{R\_GetCCallable} in the initialization routine \code{R\_init\_pkg} of \pkg{pkg} \citep[Section~5.4]{R-exts}. For the current example, the file \file{src/init.c} of \pkg{pkg} would contain the following code: \begin{Schunk} \begin{Sinput} void R_init_pkg(DllInfo *dll) { R_registerRoutines( /* native routine registration */ ); pkg_dpareto = (double(*)(double,int,int)) R_GetCCallable("actuar", "dpareto"); } \end{Sinput} \end{Schunk} \item Define a native routine interface that will call \code{dpareto}, say \code{pkg\_dpareto} to avoid any name clash, in \file{src/init.c} as follows: \begin{Schunk} \begin{Sinput} double(*pkg_dpareto)(double,double,double,int); \end{Sinput} \end{Schunk} \item Declare the routine in a header file of \pkg{pkg} with the keyword \code{extern} to expose the interface to all routines of the package. In our example, file \file{src/pkg.h} would contain: \begin{Schunk} \begin{Sinput} extern double(*pkg_dpareto)(double,double,double,int); \end{Sinput} \end{Schunk} \item Include the package header file \file{pkg.h} in any C file making use of routine \code{pkg\_dpareto}. \end{enumerate} The companion package \pkg{expint} \citep{expint} ships with a complete test package implementing the above. See the vignette of the latter package for more information. \section{Implementation details} \label{sec:implementation} The cdf of the continuous distributions of \autoref{tab:continuous} use \code{pbeta} and \code{pgamma} to compute the incomplete beta and incomplete gamma functions, respectively. Functions \code{dinvgauss}, \code{pinvgauss} and \code{qinvgauss} rely on C implementations of functions of the same name from package \pkg{statmod} \citep{statmod}. The matrix exponential C routine needed in \code{dphtype} and \code{pphtype} is based on \code{expm} from package \pkg{Matrix} \citep{Matrix}. The C code to compute the beta integral \eqref{eq:betaint:2} was written by the second author. For all but the trivial input values, the pmf, cdf and quantile functions for the zero-truncated and zero-modified distributions of \autoref{tab:discrete} use the internal R functions for the corresponding standard distribution. Generation of random variates from zero-truncated distributions uses the following simple inversion algorithm on a restricted range \citep{Dalgaard:r-help:2005,Thomopoulos:2013:simulation}. Let $u$ be a random number from a uniform distribution on $(p_0, 1)$. Then $x = P^{-1}(u)$ is distributed according to the zero-truncated version of the distribution with cdf $P(k)$. For zero-modified distributions, we generate variates from the discrete mixture \eqref{eq:mixture} when $p_0^M \geq p_0$. When $p_0^M < p_0$, we can use either of two methods: \begin{enumerate} \item the classical rejection method with an envelope that differs from the target distribution only at zero (meaning that only zeros are rejected); \item generation from the discrete mixture \eqref{eq:mixture:alt} with the corresponding zero-truncated distribution (hence using the inversion method on a restricted range explained above). \end{enumerate} Which approach is faster depends on the relative speeds of the standard random generation function and the standard quantile function, and also on the proportion of zeros that are rejected using the rejection algorithm. Based on the difference $p_0 - p_0^M$, we determined (empirically) distribution-specific cutoff points between the two methods. Finally, computation of the Poisson-inverse Gaussian pmf uses the recursive equations \eqref{eq:pig:px:recursive}. Versions of \pkg{actuar} prior to 3.0-0 used the direct expression \eqref{eq:pig:px} and the C level function \code{bessel\_k} part of the R API. However, the latter overflows for large values of $\nu$ and this caused \code{NaN} results for the value of \begin{equation*} \frac{B^{- \left(x - \frac{1}{2} \right)} K_{x - \frac{1}{2}}(B/\phi)}{x!} \end{equation*} and, therefore, for the Poisson-inverse Gaussian pmf. \appendix \section{Continuous distributions} \label{app:continuous} This appendix gives the root name and the parameters of the R support functions for the distributions of \autoref{tab:continuous}, as well as the formulas for the pdf, the cdf, the raw moment of order $k$ and the limited moment of order $k$ using the parametrization of \citet{LossModels4e} and \citet{HoggKlugman}. In the following, $\Gamma(\alpha; x)$ is the incomplete gamma function \eqref{eq:pgamma}, $\beta(a, b; x)$ is the incomplete beta function \eqref{eq:pbeta}, $G(\alpha; x)$ is the ``extended'' incomplete gamma function \eqref{eq:gammainc}, $B(a, b; x)$ is the beta integral \eqref{eq:betaint} and $K_\nu(x)$ is the modified Bessel function of the third kind \eqref{eq:bessel_k}. Unless otherwise stated, all parameters are finite and strictly positive, and the functions are defined for $x > 0$. \subsection{Feller--Pareto family} \label{app:continuous:feller-pareto} \subsubsection{Feller--Pareto} \begin{itemize} \item Root: \code{fpareto} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u^\tau (1 - u)^\alpha}{% (x - \mu) \beta (\alpha, \tau )}, \quad u = \frac{v}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(\tau+j/\gamma) \Gamma(\alpha-j/\gamma)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad \text{integer } 0 \leq k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(\tau+j/\gamma, \alpha-j/\gamma; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad \text{integer } k \geq 0, \quad \alpha - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto IV} \begin{itemize} \item Root: \code{pareto4} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \gamma u^\alpha (1 - u)}{(x - \mu)}, \quad u = \frac{1}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(1+j/\gamma) \Gamma(\alpha-j/\gamma)}{% \Gamma(\alpha)}, \quad \text{integer } 0 \leq k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(1+j/\gamma, \alpha-j/\gamma; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad \text{integer } k \geq 0 \quad \alpha - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto III} \begin{itemize} \item Root: \code{pareto3} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u (1 - u)}{(x - \mu)}, \quad u = \frac{v}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= u \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \Gamma(1+j/\gamma) \Gamma(1-j/\gamma), \quad \text{integer } 0 \leq k < \gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, B(1+j/\gamma, 1-j/\gamma; u) \\ &\phantom{=} + x^k (1 - u), \quad \text{integer } k \geq 0 \quad 1 - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto II} \begin{itemize} \item Root: \code{pareto2} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha u^\alpha (1 - u)}{(x - \mu)}, \quad u = \frac{1}{1 + v}, \quad v = \frac{x - \mu}{\theta}, \quad x > \mu \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(1+j) \Gamma(\alpha-j)}{% \Gamma(\alpha)}, \quad \text{integer } 0 \leq k < \alpha \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(1+j, \alpha-j; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad \text{integer } k \geq 0 \quad \alpha - j \neq -1, -2, \dots \end{align*} \subsubsection{Transformed beta} \begin{itemize} \item Root: \code{trbeta}, \code{pearson6} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u^\tau (1 - u)^\alpha}{% x \beta (\alpha, \tau )}, \qquad u = \frac{v}{1 + v}, \qquad v = \left(\frac{x}{\theta} \right)^\gamma \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\gamma) \Gamma(\alpha-k/\gamma)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad -\tau\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\gamma, \alpha-k/\gamma; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad k > -\tau\gamma \end{align*} \subsubsection{Burr} \begin{itemize} \item Root: \code{burr} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \gamma u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k/\gamma) \Gamma(\alpha-k/\gamma)}{% \Gamma(\alpha)}, \quad -\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k/\gamma, \alpha-k/\gamma; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -\gamma \end{align*} \subsubsection{Loglogistic} \begin{itemize} \item Root: \code{llogis} \item Parameters: \code{shape} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= u \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1+k/\gamma) \Gamma(1-k/\gamma), \quad -\gamma < k < \gamma \\ \E{(X \wedge x)^k} &= \theta^k B(1+k/\gamma, 1-k/\gamma; u) \\ &\phantom{=} + x^k (1 - u), \quad k > -\gamma \end{align*} \subsubsection{Paralogistic} \begin{itemize} \item Root: \code{paralogis} \item Parameters: \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha^2 u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\alpha \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k/\alpha) \Gamma(\alpha-k/\alpha)}{% \Gamma(\alpha)}, \quad -\alpha < k < \alpha^2 \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k/\alpha, \alpha-k/\alpha; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -\alpha \end{align*} \subsubsection{Generalized Pareto} \begin{itemize} \item Root: \code{genpareto} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u^\tau (1 - u)^\alpha}{x \beta (\alpha, \tau )}, \qquad u = \frac{v}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k) \Gamma(\alpha-k)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad -\tau < k < \alpha \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k, \alpha-k; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad k > -\tau \end{align*} \subsubsection{Pareto} \begin{itemize} \item Root: \code{pareto} \item Parameters: \code{shape} ($\alpha$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k) \Gamma(\alpha-k)}{% \Gamma(\alpha)}, \quad -1 < k < \alpha \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k, \alpha-k; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -1 \end{align*} \subsubsection{Single-parameter Pareto (Pareto I)} \begin{itemize} \item Root: \code{pareto1} \item Parameters: \code{shape} ($\alpha$), \code{min} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \theta^\alpha}{x^{\alpha+1}}, \quad x > \theta \\ F(x) &= 1 - \left( \frac{\theta}{x} \right)^\alpha, \quad x > \theta \\ \displaybreak[0] \E{X^k} &= \frac{\alpha \theta^k}{\alpha - k}, \quad k < \alpha \\ \E{(X \wedge x)^k} &= \frac{\alpha \theta^k}{\alpha - k} - \frac{k \theta^\alpha}{(\alpha - k) x^{\alpha-k}}, \quad x \geq \theta \end{align*} Although there appears to be two parameters, only $\alpha$ is a true parameter. The value of $\theta$ is the minimum of the distribution and is usually set in advance. \subsubsection{Inverse Burr} \begin{itemize} \item Root: \code{invburr} \item Parameters: \code{shape1} ($\tau$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau \gamma u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\gamma) \Gamma(1-k/\gamma)}{% \Gamma(\tau)}, \quad -\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\gamma, 1-k/\gamma; u)}{% \Gamma(\tau)} \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau\gamma \end{align*} \subsubsection{Inverse Pareto} \begin{itemize} \item Root: \code{invpareto} \item Parameters: \code{shape} ($\tau$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k) \Gamma(1-k)}{% \Gamma(\tau)}, \quad -\tau < k < 1 \\ \E{(X \wedge x)^k} &= \theta^k \tau \int_0^u y^{\tau+k-1} (1 - y)^{-k}\, dy \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau \end{align*} \subsubsection{Inverse paralogistic} \begin{itemize} \item Root: \code{invparalogis} \item Parameters: \code{shape} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau^2 u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left(\frac{x}{\theta} \right)^\tau \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\tau) \Gamma(1-k/\tau)}{% \Gamma(\tau)}, \quad -\tau^2 < k < \tau \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\tau, 1-k/\tau; u)}{% \Gamma(\tau)} \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau^2 \end{align*} \subsection{Transformed gamma family} \label{app:continuous:transformed-gamma} \subsubsection{Transformed gamma} \begin{itemize} \item Root: \code{trgamma} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\alpha e^{-u}}{x \Gamma(\alpha)}, \qquad u = \left( \frac{x}{\theta} \right)^\tau \\ F(x) &= \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha+k/\tau)}{\Gamma(\alpha)} \quad k > -\alpha\tau \\ \E{(X \wedge x)^k} &= \frac{\theta^k \Gamma(\alpha+k/\tau)}{\Gamma(\alpha)} \Gamma(\alpha+k/\tau; u) \\ &\phantom{=} + x^k [1 - \Gamma(\alpha; u)], \quad k > -\alpha\tau \end{align*} \subsubsection{Inverse transformed gamma} \begin{itemize} \item Root: \code{invtrgamma} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\alpha e^{-u}}{x\Gamma (\alpha)}, \qquad u = \left( \frac{\theta}{x} \right)^\tau \\ F(x) &= 1 - \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha-k/\tau)}{\Gamma(\alpha)} \quad k < \alpha\tau \\ \E{(X \wedge x)^k} &= \frac{\theta^k G(\alpha-k/\tau; u)}{\Gamma(\alpha)} + x^k \Gamma(\alpha; u), \quad \text{all }k \end{align*} \subsubsection{Inverse gamma} \begin{itemize} \item Root: \code{invgamma} \item Parameters: \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u^\alpha e^{-u}}{x\Gamma (\alpha)}, \qquad u = \frac{\theta}{x}\\ F(x) &= 1 - \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha-k)}{\Gamma(\alpha)} \quad k < \alpha \\ \E{(X \wedge x)^k} &= \frac{\theta^k G(\alpha-k; u)}{\Gamma(\alpha)} + x^k \Gamma(\alpha; u), \quad \text{all }k \\ M(t) &= \frac{2}{\Gamma(\alpha)} (-\theta t)^{\alpha/2} K_\alpha(\sqrt{-4\theta t}) \end{align*} \subsubsection{Inverse Weibull} \begin{itemize} \item Root: \code{invweibull}, \code{lgompertz} \item Parameters: \code{shape} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u e^{-u}}{x}, \qquad u = \left( \frac{\theta}{x} \right)^\tau \\ F(x) &= e^{-u} \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1-k/\tau) \quad k < \tau \\ \E{(X \wedge x)^k} &= \theta^k G(1-k/\tau; u) + x^k (1 - e^{-u}), \quad \text{all }k \end{align*} \subsubsection{Inverse exponential} \begin{itemize} \item Root: \code{invexp} \item Parameters: \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u e^{-u}}{x}, \qquad u = \frac{\theta}{x} \\ F(x) &= e^{-u} \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1-k) \quad k < 1 \\ \E{(X \wedge x)^k} &= \theta^k G(1-k; u) + x^k (1 - e^{-u}), \quad \text{all }k \end{align*} \subsection{Other distributions} \label{app:continuous:other} \subsubsection{Loggamma} \begin{itemize} \item Root: \code{lgamma} \item Parameters: \code{shapelog} ($\alpha$), \code{ratelog} ($\lambda$) \end{itemize} \begin{align*} f(x) &= \frac{\lambda^\alpha (\ln x)^{\alpha - 1}}{% x^{\lambda + 1} \Gamma(\alpha)}, \quad x > 1 \\ F(x) &= \Gamma( \alpha ; \lambda \ln x), \quad x > 1 \\ \displaybreak[0] \E{X^k} &= \left( \frac{\lambda}{\lambda - k} \right)^\alpha, \quad k < \lambda \\ \E{(X \wedge x)^k} &= \left( \frac{\lambda}{\lambda - k} \right)^\alpha \Gamma(\alpha; (\lambda - k) \ln x) \\ &\phantom{=} + x^k (1 - \Gamma(\alpha; \lambda \ln x)), \quad k < \lambda \end{align*} \subsubsection{Gumbel} \begin{itemize} \item Root: \code{gumbel} \item Parameters: \code{alpha} ($-\infty < \alpha < \infty$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{e^{-(u + e^{-u})}}{\theta}, \qquad u = \frac{x - \alpha}{\theta}, \qquad -\infty < x < \infty \\ F(x) &= \exp[-\exp(-u)] \\ \displaybreak[0] \E{X} &= \alpha + \gamma \theta, \quad \gamma \approx 0.57721566490153 \\ \VAR{X} &= \frac{\pi^2 \theta^2}{6} \\ M(t) &= e^{\alpha t} \Gamma(1 - \theta t) \end{align*} \subsubsection{Inverse Gaussian} \begin{itemize} \item Root: \code{invgauss} \item Parameters: \code{mean} ($\mu$), \code{shape} ($\lambda = 1/\phi$), \code{dispersion} ($\phi$) \end{itemize} \begin{align*} f(x) &= \left( \frac{1}{2 \pi \phi x^3} \right)^{1/2} \exp\left\{ -\frac{(x/\mu - 1)^2}{2 \phi x} \right\} \\ F(x) &= \Phi\left( \frac{x/\mu - 1}{\sqrt{\phi x}} \right) + e^{2/(\phi\mu)} \Phi\left( -\frac{x/\mu + 1}{\sqrt{\phi x}} \right) \\ \displaybreak[0] \E{X^k} &= \mu^k \sum_{i = 0}^{k - 1} \frac{(k + i - 1)!}{i! (k - i - 1)!} \left( \frac{\phi \mu}{2} \right)^{i}, \quad k = 1, 2, \dots \\ \E{X \wedge x} &= \mu \left[ \Phi\left( \frac{x/\mu - 1}{\sqrt{\phi x}} \right) - e^{2/(\phi\mu)} \Phi\left(- \frac{x/\mu + 1}{\sqrt{\phi x}} \right) \right] \\ &\phantom{=} + x (1 - F(x)) \\ M(t) &= \exp \left\{ \frac{1}{\phi \mu} \left(1 - \sqrt{1 - 2 \phi \mu^2 t}\right) \right\}, \quad t \leq \frac{1}{2 \phi \mu^2} \end{align*} \noindent% The limiting case $\mu = \infty$ is an inverse gamma distribution with $\alpha = 1/2$ and $\lambda = 2\phi$ (or inverse chi-squared). \subsubsection{Generalized beta} \begin{itemize} \item Root: \code{genbeta} \item Parameters: \code{shape1} ($a$), \code{shape2} ($b$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^a (1 - u)^{b - 1}}{x \beta (a, b)}, \qquad u = \left( \frac{x}{\theta} \right)^\tau, \qquad 0 < x < \theta \\ F(x) &= \beta (a, b ; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \beta(a+k/\tau, b)}{\beta(a, b)}, \quad k > -a\tau \\ \E{(X \wedge x)^k} &= \frac{% \theta^k \beta(a+k/\tau, b)}{\beta(a, b)} \beta(a+k/\tau, b; u) \\ &\phantom{=} + x^k [1 - \beta(a, b; u)], \quad k > -\tau\gamma \end{align*} \section{Phase-type distributions} \label{app:phase-type} Consider a continuous-time Markov process with $m$ transient states and one absorbing state. Let \begin{equation*} \mat{Q} = \begin{bmatrix} \mat{T} & \mat{t} \\ \mat{0} & 0 \end{bmatrix} \end{equation*} be the transition rates matrix (or intensity matrix) of such a process and let $(\mat{\pi}, \pi_{m + 1})$ be the initial probability vector. Here, $\mat{T}$ is an $m \times m$ non-singular matrix with $t_{ii} < 0$ for $i = 1, \dots, m$ and $t_{ij} \geq 0$ for $i \neq j$; $\mat{\pi}$ is an $1 \times m$ vector of probabilities such that $\mat{\pi} \mat{e} + \pi_{m + 1} = 1$; $\mat{t} = -\mat{T} \mat{e}$; $\mat{e} = [1]_{m \times 1}$ is a column vector of ones. % \bigskip \begin{itemize} \item Root: \code{phtype} \item Parameters: \code{prob} ($\mat{\pi}_{1 \times m}$), \code{rates} ($\mat{T}_{m \times m}$) \end{itemize} \begin{align*} f(x) &= \begin{cases} 1 - \mat{\pi} \mat{e} & x = 0, \\ \mat{\pi} e^{\mat{T} x} \mat{t}, & x > 0 \end{cases} \\ F(x) &= \begin{cases} 1 - \mat{\pi} \mat{e}, & x = 0, \\ 1 - \mat{\pi} e^{\mat{T} x} \mat{e}, & x > 0 \end{cases} \\ \E{X^k} &= k! \mat{\pi} (-\mat{T})^{-k} \mat{e} \\ M(t) &= \mat{\pi} (-t \mat{I} - \mat{T})^{-1} \mat{t} + (1 - \mat{\pi} \mat{e}) \end{align*} \section{Discrete distributions} \label{app:discrete} This appendix gives the root name and the parameters of the R support functions for the members of the $(a, b, 0)$ and $(a, b, 1)$ discrete distributions as defined in \citet{LossModels4e}; the values of $a$, $b$ and $p_0$ in the representation; the pmf; the relationship to other distributions, when there is one. Since at some point we developed the formulas for the first three cumulants\footnote{% Hence the mean, variance and skewness.} % of the distributions of the $(a, b, 1)$ class, they are also recorded here for posterity. The appendix also provides the main characteristics of the Poisson-inverse Gaussian distribution. \subsection{Standard distributions} \label{app:discrete:a-b-0} This section contains distributions of the $(a, b, 0)$ class. They are all supported in base R. Their pmf can be computed recursively by fixing $p_0$ to the specified value and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 1, 2, \dots$. All parameters are finite. \subsubsection{Poisson} \begin{itemize} \item Root: \code{pois} \item Parameter: \code{lambda} ($\lambda \geq 0$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_0 = e^{-\lambda} \\ p_k &= \frac{e^{-\lambda} \lambda^k}{k!} \end{align*} \subsubsection{Negative binomial} \begin{itemize} \item Root: \code{nbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$), \code{mu} ($r(1 - p)/p$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_0 = p^r \\ p_k &= \binom{r+k-1}{k} p^r (1 - p)^k \end{align*} \begin{itemize} \item Special case: Geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Geometric} \begin{itemize} \item Root: \code{geom} \item Parameter: \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_0 = p \\ p_k &= p (1 - p)^k \end{align*} \subsubsection{Binomial} \begin{itemize} \item Root: \code{binom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_0 = (1 - p)^n \\ p_k &= \binom{n}{k} p^k (1 - p)^{n - k}, \quad k = 1, 2, \dots, n \end{align*} \begin{itemize} \item Special case: Bernoulli$(p)$ when $n = 1$. \end{itemize} \subsection{Zero-truncated distributions} \label{app:discrete:zt} Package \pkg{actuar} provides support for the distributions of the $(a, b, 1)$ class in this section. Zero-truncated distributions have probability at zero $p_0^T = 0$. Their pmf can be computed recursively by fixing $p_1$ to the value specified below and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 2, 3, \dots$. The distributions are all defined on $k = 1, 2, \dots$. The cumulants are developed using \eqref{eq:cumulants}. A limiting case of all zero-truncated distributions is a single point mass in $k = 1$. \subsubsection{Zero-truncated Poisson} \begin{itemize} \item Root: \code{ztpois} \item Parameter: \code{lambda} ($\lambda \geq 0$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_1 = \frac{\lambda}{e^\lambda - 1} \\ p_k &= \frac{\lambda^k}{k! (e^\lambda - 1)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= c \lambda, \qquad c = \frac{1}{1 - e^{-\lambda}} \\ \kappa_2 &= c \lambda + c(1 - c) \lambda^2 \\ \kappa_3 &= c(1 - c)(1 - 2c) \lambda^3 + 3c(1 - c) \lambda^2 + c \lambda \end{align*} \subsubsection{Zero-truncated negative binomial} \begin{itemize} \item Root: \code{ztnbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_1 = \frac{r p^r (1 - p)}{1 - p^r} \\ p_k &= \binom{r+k-1}{k} \frac{p^r (1 - p)^k}{1 - p^r} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= crd, \qquad c = \frac{1}{1 - p^r}, \qquad d = \frac{1 - p}{p} \\ \kappa_2 &= crd(d + 1) + c(1 - c) r^2 d^2 \\ \kappa_3 &= crd(3rd(d + 1) + r^2 d^2 + (d + 1)(2d + 1)) \\ &\phantom{=} - 3(crd)^2(rd + d + 1) + 2(crd)^3 \end{align*} \begin{itemize} \item Special cases: Logarithmic$(1 - p)$ when $r = 0$; Zero-truncated geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Zero-truncated geometric} \begin{itemize} \item Root: \code{ztgeom} \item Parameter: \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_1 = p \\ p_k &= p (1 - p)^{k - 1} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{1}{p} \\ \kappa_2 &= \frac{1 - p}{p^2} \\ \kappa_3 &= \frac{(1 - p)(2 - p)}{p^3} \end{align*} \subsubsection{Zero-truncated binomial} \begin{itemize} \item Root: \code{ztbinom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_1 = \frac{n p (1 - p)^{n - 1}}{1 - (1 - p)^n} \\ p_k &= \binom{n}{k} \frac{p^k (1 - p)^{n - k}}{1 - (1 - p)^n}, \quad k = 1, 2, \dots, n \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= cnp, \qquad c = \frac{1}{1 - (1 - p)^n} \\ \kappa_2 &= cnp(1 - p) + c(1 - c)n^2p^2 \\ \kappa_3 &= cnp(1 + 3(n - 1)p + (n - 1)(n - 2)p^2) \\ &\phantom{=} - 3(cnp)^2(1 + (n - 1)p) + 2(cnp)^3 \end{align*} \subsubsection{Logarithmic} \begin{itemize} \item Root: \code{logarithmic} \item Parameter: \code{prob} ($0 \leq p < 1$) \end{itemize} \begin{align*} a &= p, \qquad b = -p, \qquad p_1 = - \frac{p}{\log (1 - p)} \\ p_k &= - \frac{p^k}{k \log (1 - p)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{-p}{\ln(1 - p)(1 - p)} \\ \kappa_2 &= \frac{-p(p + 2\ln(1 - p)) + p\ln(1 - p)}% {\ln(1 - p)^2(1 - p)^2} \\ \kappa_3 &= \frac{p(p + 1)\ln(1 - p)^2(1 - p)^3(p - 1)^{-3} - 3p^2 \ln(1 - p) - 2p^3}% {-(-p(p + 2 \ln(1 - p)) + p \ln(1 - p))^{3/2}} \end{align*} \subsection{Zero-modified distributions} \label{app:discrete:zm} Package \pkg{actuar} provides support for the distributions of the $(a, b, 1)$ class in this section. Zero-modified distributions have an arbitrary probability at zero $p_0^M \neq p_0$, where $p_0$ is the probability at zero for the corresponding member of the $(a, b, 0)$ class. Their pmf can be computed recursively by fixing $p_1$ to the value specified below and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 2, 3, \dots$. The distributions are all defined on $k = 0, 1, 2, \dots$. The cumulants are developed using \eqref{eq:cumulants} with $c$ as defined in \eqref{eq:c-in-cumulants-zm}. A limiting case of all zero-modified distributions is a discrete mixture between a point mass in $k = 0$ (with probability $p_0^M$) and a point mass in $k = 1$ (with probability $1 - p_0^M$). \subsubsection{Zero-modified Poisson} \begin{itemize} \item Root: \code{zmpois} \item Parameters: \code{lambda} ($\lambda > 0$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_1 = \frac{(1 - p_0^M) \lambda}{e^\lambda - 1} \\ p_k &= \frac{(1 - p_0^M) \lambda^k}{k! (e^\lambda - 1)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= c \lambda, \qquad c = \frac{1 - p_0^M}{1 - e^{-\lambda}} \\ \kappa_2 &= c \lambda + c(1 - c) \lambda^2 \\ \kappa_3 &= c(1 - c)(1 - 2c) \lambda^3 + 3c(1 - c) \lambda^2 + c \lambda \end{align*} \subsubsection{Zero-modified negative binomial} \begin{itemize} \item Root: \code{zmnbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_1 = \frac{(1 - p_0^M) r p^r (1 - p)}{1 - p^r} \\ p_k &= \binom{r+k-1}{k} \frac{(1 - p_0^M) p^r (1 - p)^k}{1 - p^r} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= crd, \qquad c = \frac{1 - p_0^M}{1 - p^r}, \qquad d = \frac{1 - p}{p} \\ \kappa_2 &= crd(d + 1) + c(1 - c) r^2 d^2 \\ \kappa_3 &= crd(3rd(d + 1) + r^2 d^2 + (d + 1)(2d + 1)) \\ &\phantom{=} - 3(crd)^2(rd + d + 1) + 2(crd)^3 \end{align*} \begin{itemize} \item Special cases: Zero-modified logarithmic$(1 - p)$ when $r = 0$; Zero-modified geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Zero-modified geometric} \begin{itemize} \item Root: \code{zmgeom} \item Parameters: \code{prob} ($0 < p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_1 = (1 - p_0^M) p \\ p_k &= (1 - p_0^M) p (1 - p)^{k - 1} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{1 - p_0^M}{p} \\ \kappa_2 &= \frac{(1 - p_0^M)(1 - p + p_0^M)}{p^2} \\ \kappa_3 &= \frac{(1 - p_0^M)(6 - 6p + p^2) - 3(1 - p_0^M)^2(2 - p) + 2(1 - p_0^M)^3}{p^3} \end{align*} \subsubsection{Zero-modified binomial} \begin{itemize} \item Root: \code{zmbinom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_1^M = \frac{n (1 - p_0^M) p (1 - p)^{n - 1}}{1 - (1 - p)^n} \\ p_k &= \binom{n}{k} \frac{(1 - p_0^M) p^k (1 - p)^{n - k}}{1 - (1 - p)^n}, \quad k = 1, 2, \dots, n \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= cnp, \qquad c = \frac{1 - p_0^M}{1 - (1 - p)^n} \\ \kappa_2 &= cnp(1 - p) + c(1 - c)n^2p^2 \\ \kappa_3 &= cnp(1 + 3(n - 1)p + (n - 1)(n - 2)p^2) \\ &\phantom{=} - 3(cnp)^2(1 + (n - 1)p) + 2(cnp)^3 \end{align*} \subsubsection{Zero-modified logarithmic} \begin{itemize} \item Root: \code{zmlogarithmic} \item Parameters: \code{prob} ($0 \leq p < 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= p, \qquad b = -p, \qquad p_1 = - \frac{(1 - p_0^M) p}{\log (1 - p)} \\ p_k &= - \frac{(1 - p_0^M) p^k}{k \log (1 - p)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{-cp}{\ln(1 - p)(1 - p)}, \qquad c = 1 - p_0^M \\ \kappa_2 &= \frac{-cp(p + 2 \ln(1 - p)) + c^2 p \ln(1 - p)}% {\ln(1 - p)^2 (1 - p)^2} \\ \kappa_3 &= \frac{cp(p + 1) \ln(1 - p)^2(1 - p)^3(p - 1)^{-3} - 3c^2 p^2 \ln(1 - p) - 2c^3 p^3}% {-(-cp(p + 2 \ln(1 - p)) + c^2 p \ln(1 - p))^{3/2}} \end{align*} \subsection{Poisson-inverse Gaussian} \label{app:discrete:pig} \begin{itemize} \item Root: \code{poisinvgauss}, \code{pig} \item Parameters: \code{mean} ($\mu > 0$), \code{shape} ($\lambda = 1/\phi$), \code{dispersion} ($\phi > 0$) \end{itemize} \begin{align*} p_x &= \sqrt{\frac{2}{\pi \phi}} \frac{e^{(\phi\mu)^{-1}}}{x!} \left( \sqrt{2\phi \left( 1 + \frac{1}{2\phi\mu^2} \right)} \right)^{- \left( x - \frac{1}{2} \right)} \\ &\phantom{=} \times K_{x - 1/2} \left( \sqrt{\frac{2}{\phi}\left(1 + \frac{1}{2\phi\mu^2}\right)} \right), \quad x = 0, 1, \dots, \end{align*} \noindent% Recursively: \begin{align*} p_0 &= \exp\left\{ \frac{1}{\phi\mu} \left(1 - \sqrt{1 + 2\phi\mu^2}\right) \right\} \\ p_1 &= \frac{\mu}{\sqrt{1 + 2\phi\mu^2}}\, p_0 \\ p_x &= \frac{2\phi\mu^2}{1 + 2\phi\mu^2} \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{\mu^2}{1 + 2\phi\mu^2} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{align*} \noindent% In the limiting case $\mu = \infty$, the pmf reduces to \begin{equation*} p_x = \sqrt{\frac{2}{\pi \phi}} \frac{1}{x!} (\sqrt{2\phi})^{- \left( x - \frac{1}{2} \right)} K_{x - \frac{1}{2}} (\sqrt{2/\phi}), \quad x = 0, 1, \dots \end{equation*} and the recurrence relations become \begin{align*} p_0 &= \exp\left\{-\sqrt{2/\phi}\right\} \\ p_1 &= \frac{1}{\sqrt{2\phi}}\, p_0 \\ p_x &= \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{1}{2\phi} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{align*} %% References \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% coding: utf-8 %%% TeX-master: t %%% End: actuar/vignettes/simulation.Rnw0000644000176200001440000004772215151411046016427 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Simulation of insurance data} %\VignettePackage{actuar} %\SweaveUTF8 \title{Simulation of insurance data with \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal \\[3ex] Louis-Philippe Pouliot \\ Université Laval} \date{} <>= library(actuar) options(width = 52, digits = 4) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} Package \pkg{actuar} provides functions to facilitate the generation of random variates from various probability models commonly used in actuarial applications. From the simplest to the most sophisticated, these functions are: \begin{enumerate} \item \code{rmixture} to simulate from discrete mixtures; \item \code{rcompound} to simulate from compound models (and a simplified version, \code{rcompois} to simulate from the very common compound Poisson model); \item \code{rcomphierarc} to simulate from compound models where both the frequency and the severity components can have a hierarchical structure. \end{enumerate} \section{Simulation from discrete mixtures} \label{sec:rmixture} A random variable is said to be a discrete mixture of the random variables with probability density functions $f_1, \dots, f_n$ if its density can be written as \begin{equation} \label{eq:mixture} f(x) = p_1 f_1(x) + \dots + p_n f_n(x) = \sum_{i = 1}^n p_i f_i(x), \end{equation} where $p_1, \dots, p_n$ are probabilities (or weights) such that $p_i \geq 0$ and $\sum_{i = 1}^n p_i = 1$. Function \code{rmixture} makes it easy to generate random variates from such mixtures. The arguments of the function are: \begin{enumerate} \item \code{n} the number of variates to generate; \item \code{probs} a vector of values that will be normalized internally to create the probabilities $p_1, \dots, p_n$; \item \code{models} a vector of expressions specifying the simulation models corresponding to the densities $f_1, \dots, f_n$. \end{enumerate} The specification of simulation models follows the syntax of \code{rcomphierarc} (explained in greater detail in \autoref{sec:rcomphierarc}). In a nutshell, the models are expressed in a semi-symbolic fashion using an object of mode \code{"expression"} where each element is a complete call to a random number generation function, with the number of variates omitted. The following example should clarify this concept. \begin{example} Let $X$ be a mixture between two exponentials: one with mean $1/3$ and one with mean $1/7$. The first exponential has twice as much weight as the second one in the mixture. Therefore, the density of $X$ is \begin{equation*} f(x) = \frac{2}{3} (3 e^{-3x}) + \frac{1}{3} (7 e^{-7x}) \\ = 2 e^{-3x} + \frac{7}{3} e^{-7x}. \end{equation*} The following expression generates $10$ random variates from this density using \code{rmixture}. <>= rmixture(10, probs = c(2, 1), models = expression(rexp(3), rexp(7))) @ \qed \end{example} See also \autoref{ex:comppois} for a more involved application combining simulation from a mixture and simulation from a compound Poisson model. \section{Simulation from compound models} \label{sec:rcompound} Actuaries often need to simulate separately the frequency and the severity of claims for compound models of the form \begin{equation} \label{eq:definition-S} S = C_1 + \dots + C_N, \end{equation} where $C_1, C_2, \dots$ are the mutually independent and identically distributed random variables of the claim amounts, each independent of the frequency random variable $N$. Function \code{rcompound} generates variates from the random variable $S$ when the distribution of both random variables $N$ and $C$ is non hierarchical; for the more general hierarchical case, see \autoref{sec:rcomphierarc}. The function has three arguments: \begin{enumerate} \item \code{n} the number of variates to generate; \item \code{model.freq} the frequency model (random variable $N$); \item \code{model.sev} the severity model (random variable $C$). \end{enumerate} Arguments \code{model.freq} and \code{model.sev} are simple R expressions consisting of calls to a random number generation function with the number of variates omitted. This is of course similar to argument \code{models} of \code{rmixture}, only with a slightly simpler syntax since one does not need to wrap the calls in \code{expression}. Function \code{rcomppois} is a simplified interface for the common case where $N$ has a Poisson distribution and, therefore, $S$ is compound Poisson. In this function, argument \code{model.freq} is replaced by \code{lambda} that takes the value of the Poisson parameter. \begin{example} Let $S \sim \text{Compound Poisson}(1.5, F)$, where $1.5$ is the value of the Poisson parameter and $F$ is the cumulative distribution function of a gamma distribution with shape parameter $\alpha = 3$ and rate parameter $\lambda = 2$. We obtain variates from the random variable $S$ using \code{rcompound} or \code{rcompois} as follows: <>= rcompound(10, rpois(1.5), rgamma(3, 2)) rcomppois(10, 1.5, rgamma(3, 2)) @ Specifying argument \code{SIMPLIFY = FALSE} to either function will return not only the variates from $S$, but also the underlying variates from the random variables $N$ and $C_1, \dots, C_N$: <>= rcomppois(10, 1.5, rgamma(3, 2), SIMPLIFY = FALSE) @ \qed \end{example} \begin{example} \label{ex:comppois} Theorem~9.7 of \cite{LossModels4e} states that the sum of compound Poisson random variables is itself compound Poisson with Poisson parameter equal to the sum of the Poisson parameters and severity distribution equal to the mixture of the severity models. Let $S = S_1 + S_2 + S_3$, where: % $S_1$ is compound Poisson with mean frequency $\lambda = 2$ and severity gamma with parameters $(3, 1)$; % $S_2$ is compound Poisson with $\lambda = 1$ and severity Gamma with parameters $(5, 4)$; % $S_3$ is compound Poisson with $\lambda = 1/2$ and severity Lognormal with parameters $(2, 1)$. % By the aforementioned theorem, $S$ is compound Poisson with $\lambda = 2 + 1 + 1/2 = 7/2$ and severity density \begin{equation*} f(x) = \frac{4}{7} \left( \frac{1}{\Gamma(3)} x^2 e^{-x} \right) + \frac{2}{7} \left( \frac{4^5}{\Gamma(5)} x^4 e^{-4x} \right) + \frac{1}{7} \phi(\ln x - 2). \end{equation*} Combining \code{rcomppois} and \code{rmixture} we can generate variates of $S$ using the following elegant expression. <>= x <- rcomppois(1e5, 3.5, rmixture(probs = c(2, 1, 0.5), expression(rgamma(3), rgamma(5, 4), rlnorm(2, 1)))) @ One can verify that the theoretical mean of $S$ is $6 + 5/4 + (e^{5/2})/2 = 13.34$. Now, the empirical mean based on the above sample of size $10^5$ is: <>= mean(x) @ \qed \end{example} \section{Simulation from compound hierarchical models} \label{sec:rcomphierarc} Hierarchical probability models are widely used for data classified in a tree-like structure and in Bayesian inference. The main characteristic of such models is to have the probability law at some level in the classification structure be conditional on the outcome in previous levels. For example, adopting a bottom to top description of the model, a simple hierarchical model could be written as \begin{equation} \label{eq:basic_model} \begin{split} X_t|\Lambda, \Theta &\sim \text{Poisson}(\Lambda) \\ \Lambda|\Theta &\sim \text{Gamma}(3, \Theta) \\ \Theta &\sim \text{Gamma}(2, 2), \end{split} \end{equation} where $X_t$ represents actual data. The random variables $\Theta$ and $\Lambda$ are generally seen as uncertainty, or risk, parameters in the actuarial literature; in the sequel, we refer to them as mixing parameters. The example above is merely a multi-level mixture of models, something that is simple to simulate ``by hand''. The following R expression will yield $n$ variates of the random variable $X_t$: <>= rpois(n, rgamma(n, 3, rgamma(n, 2, 2))) @ However, for categorical data common in actuarial applications there will usually be many categories --- or \emph{nodes} --- at each level. Simulation is then complicated by the need to always use the correct parameters for each variate. Furthermore, one may need to simulate both the frequency and the severity of claims for compound models of the form \eqref{eq:definition-S}. This section briefly describes function \code{rcomphierarc} and its usage. \cite{Goulet:simpf:2008} discuss in more details the models supported by the function and give more thorough examples. \subsection{Description of hierarchical models} \label{sec:rcomphierarc:description} We consider simulation of data from hierarchical models. We want a method to describe these models in R that meets the following criteria: \begin{enumerate} \item simple and intuitive to go from the mathematical formulation of the model to the R formulation and back; \item allows for any number of levels and nodes; \item at any level, allows for any use of parameters higher in the hierarchical structure. \end{enumerate} A hierarchical model is completely specified by the number of nodes at each level and by the probability laws at each level. The number of nodes is passed to \code{rcomphierarc} by means of a named list where each element is a vector of the number of nodes at a given level. Vectors are recycled when the number of nodes is the same throughout a level. Probability models are expressed in a semi-symbolic fashion using an object of mode \code{"expression"}. Each element of the object must be named --- with names matching those of the number of nodes list --- and should be a complete call to an existing random number generation function, but with the number of variates omitted. Hierarchical models are achieved by replacing one or more parameters of a distribution at a given level by any combination of the names of the levels above. If no mixing is to take place at a level, the model for this level can be \code{NULL}. \begin{example} Consider the following expanded version of model \eqref{eq:basic_model}: \begin{align*} X_{ijt}|\Lambda_{ij}, \Theta_i &\sim \text{Poisson}(\Lambda_{ij}), & t &= 1, \dots, n_{ij} \\ \Lambda_{ij}|\Theta_i &\sim \text{Gamma}(3, \Theta_i), & j &= 1, \dots, J_i \\ \Theta_i &\sim \text{Gamma}(2, 2), & i &= 1, \dots, I, \end{align*} with $I = 3$, $J_1 = 4$, $J_2 = 5$, $J_3 = 6$ and $n_{ij} \equiv n = 10$. Then the number of nodes and the probability model are respectively specified by the following expressions. \begin{Schunk} \begin{Verbatim} list(Theta = 3, Lambda = c(4, 5, 6), Data = 10) \end{Verbatim} \end{Schunk} \begin{Schunk} \begin{Verbatim} expression(Theta = rgamma(2, 2), Lambda = rgamma(3, Theta), Data = rpois(Lambda)) \end{Verbatim} \end{Schunk} \qed \end{example} Storing the probability model requires an expression object in order to avoid evaluation of the incomplete calls to the random number generation functions. Function \code{rcomphierarc} builds and executes the calls to the random generation functions from the top of the hierarchical model to the bottom. At each level, the function \begin{enumerate} \item infers the number of variates to generate from the number of nodes list, and \item appropriately recycles the mixing parameters simulated previously. \end{enumerate} The actual names in the list and the expression object can be anything; they merely serve to identify the mixing parameters. Furthermore, any random generation function can be used. The only constraint is that the name of the number of variates argument is \code{n}. In addition, \code{rcomphierarc} supports usage of weights in models. These usually modify the frequency parameters to take into account the ``size'' of an entity. Weights are used in simulation wherever the name \code{weights} appears in a model. \subsection[Usage of rcomphierarc]{Usage of \code{rcomphierarc}} \label{sec:rcomphierarc:usage} Function \code{rcomphierarc} can simulate data for structures where both the frequency model and the severity model are hierarchical. It has four main arguments: \begin{enumerate} \item \code{nodes} for the number of nodes list; \item \code{model.freq} for the frequency model; \item \code{model.sev} for the severity model; \item \code{weights} for the vector of weights in lexicographic order, that is all weights of entity 1, then all weights of entity 2, and so on. \end{enumerate} The function returns the variates in a list of class \code{"portfolio"} with a \code{dim} attribute of length two. The list contains all the individual claim amounts for each entity. Since every element can be a vector, the object can be seen as a three-dimension array with a third dimension of potentially varying length. The function also returns a matrix of integers giving the classification indexes of each entity in the portfolio. The package also defines methods for four generic functions to easily access key quantities for each entity of the simulated portfolio: \begin{enumerate} \item a method of \code{aggregate} to compute the aggregate claim amounts $S$; \item a method of \code{frequency} to compute the number of claims $N$; \item a method of \code{severity} (a generic function introduced by the package) to return the individual claim amounts $C_j$; \item a method of \code{weights} to extract the weights matrix. \end{enumerate} In addition, all methods have a \code{classification} and a \code{prefix} argument. When the first is \code{FALSE}, the classification index columns are omitted from the result. The second argument overrides the default column name prefix; see the \code{rcomphierarc.summaries} help page for details. The following example illustrates these concepts in detail. \begin{example} Consider the following compound hierarchical model: \begin{equation*} S_{ijt} = C_{ijt1} + \dots + C_{ijt N_{ijt}}, \end{equation*} for $i = 1, \dots, I$, $j = 1, \dots, J_i$, $t = 1, \dots, n_{ij}$ and with \begin{align*} N_{ijt}|\Lambda_{ij}, \Phi_i &\sim \text{Poisson}(w_{ijt} \Lambda_{ij}) & C_{ijtu}|\Theta_{ij}, \Psi_i &\sim \text{Lognormal}(\Theta_{ij}, 1) \notag \\ \Lambda_{ij}|\Phi_i &\sim \text{Gamma}(\Phi_i, 1) & \Theta_{ij}|\Psi_i &\sim N(\Psi_i, 1) \\ \Phi_i &\sim \text{Exponential}(2) & \Psi_i &\sim N(2, 0.1). \notag \end{align*} (Note how weights modify the Poisson parameter.) Using as convention to number the data level 0, the above is a two-level compound hierarchical model. Assuming that $I = 2$, $J_1 = 4$, $J_2 = 3$, $n_{11} = \dots = n_{14} = 4$ and $n_{21} = n_{22} = n_{23} = 5$ and that weights are simply simulated from a uniform distribution on $(0.5, 2.5)$, then simulation of a data set with \code{rcomphierarc} is achieved with the following expressions. <>= set.seed(3) @ <>= nodes <- list(cohort = 2, contract = c(4, 3), year = c(4, 4, 4, 4, 5, 5, 5)) mf <- expression(cohort = rexp(2), contract = rgamma(cohort, 1), year = rpois(weights * contract)) ms <- expression(cohort = rnorm(2, sqrt(0.1)), contract = rnorm(cohort, 1), year = rlnorm(contract, 1)) wijt <- runif(31, 0.5, 2.5) pf <- rcomphierarc(nodes = nodes, model.freq = mf, model.sev = ms, weights = wijt) @ Object \code{pf} is a list of class \code{"portfolio"} containing, among other things, the aforementioned two-dimension list as element \code{data} and the classification matrix (subscripts $i$ and $j$) as element \code{classification}: <>= class(pf) pf$data pf$classification @ The output of \code{pf\$data} is not much readable. If we were to print the results of \code{rcomphierarc} this way, many users would wonder what \code{Numeric,\emph{n}} means. (It is actually R's way to specify that a given element in the list is a numeric vector of length $n$ --- the third dimension mentioned above.) To ease reading, the \code{print} method for objects of class \code{"portfolio"} only prints the simulation model and the number of claims in each node: <>= pf @ By default, the method of \code{aggregate} returns the values of $S_{ijt}$ in a regular matrix (subscripts $i$ and $j$ in the rows, subscript $t$ in the columns). The method has a \code{by} argument to get statistics for other groupings and a \code{FUN} argument to get statistics other than the sum: <>= aggregate(pf) aggregate(pf, by = c("cohort", "year"), FUN = mean) @ The method of \code{frequency} returns the values of $N_{ijt}$. It is mostly a wrapper for the \code{aggregate} method with the default \code{sum} statistic replaced by \code{length}. Hence, arguments \code{by} and \code{FUN} remain available: <>= frequency(pf) frequency(pf, by = "cohort") @ The method of \code{severity} returns the individual variates $C_{ijtu}$ in a matrix similar to those above, but with a number of columns equal to the maximum number of observations per entity, \begin{displaymath} \max_{i, j} \sum_{t = 1}^{n_{ij}} N_{ijt}. \end{displaymath} Thus, the original period of observation (subscript $t$) and the identifier of the severity within the period (subscript $u$) are lost and each variate now constitute a ``period'' of observation. For this reason, the method provides an argument \code{splitcol} in case one would like to extract separately the individual severities of one or more periods: <>= severity(pf) severity(pf, splitcol = 1) @ Finally, the weights matrix corresponding to the data in object \code{pf} is <>= weights(pf) @ Combined with the argument \code{classification = FALSE}, the above methods can be used to easily compute loss ratios: <>= aggregate(pf, classif = FALSE)/ weights(pf, classif = FALSE) @ \qed \end{example} \begin{example} \cite{Scollnik:2001:MCMC} considers the following model for the simulation of claims frequency data in a Markov Chain Monte Carlo (MCMC) context: \begin{align*} S_{it}|\Lambda_i, \alpha, \beta &\sim \text{Poisson}(w_{ij} \Lambda_i) \\ \Lambda_i|\alpha, \beta &\sim \text{Gamma}(\alpha, \beta) \\ \alpha &\sim \text{Gamma}(5, 5) \\ \beta &\sim \text{Gamma}(25, 1) \end{align*} for $i = 1, 2, 3$, $j = 1, \dots, 5$ and with weights $w_{it}$ simulated from \begin{align*} w_{it}|a_i, b_i &\sim \text{Gamma}(a_i, b_i) \\ a_i &\sim U(0, 100) \\ b_i &\sim U(0, 100). \end{align*} Strictly speaking, this is not a hierarchical model since the random variables $\alpha$ and $\beta$ are parallel rather than nested. Nevertheless, with some minor manual intervention, function \code{rcomphierarc} can simulate data from this model. First, one simulates the weights (in lexicographic order) with <>= set.seed(123) @ <>= wit <- rgamma(15, rep(runif(3, 0, 100), each = 5), rep(runif(3, 0, 100), each = 5)) @ Second, one calls \code{rcomphierarc} to simulate the frequency data. The key here consists in manually inserting the simulation of the shape and rate parameters of the gamma distribution in the model for $\Lambda_i$. Finally, wrapping the call to \code{rcomphierarc} in \code{frequency} will immediately yield the matrix of observations: <>= frequency(rcomphierarc(list(entity = 3, year = 5), expression(entity = rgamma(rgamma(1, 5, 5), rgamma(1, 25, 1)), year = rpois(weights * entity)), weights = wit)) @ \qed \end{example} One will find more examples of \code{rcomphierarc} usage in the \code{simulation} demo file. The function was used to simulate the data in \cite{Goulet_cfs}. %% References \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% coding: utf-8 %%% TeX-master: t %%% End: actuar/vignettes/actuar.Rnw0000644000176200001440000000503415147745722015530 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Introduction to actuar} %\VignettePackage{actuar} %\SweaveUTF8 \title{Introduction to \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} \begin{document} \maketitle \section{Introduction} \label{sec:introduction} \pkg{actuar} \citep{actuar} provides additional actuarial science functionality and support for heavy tailed distributions to the R statistical system. The project was officially launched in 2005 and is under active development. The current feature set of the package can be split into five main categories: additional probability distributions; loss distributions modeling; risk and ruin theory; simulation of compound hierarchical models; credibility theory. Furthermore, starting with version 3.0-0, \pkg{actuar} gives easy access to many of its underlying C workhorses through an API. As much as possible, the developers have tried to keep the ``user interface'' of the various functions of the package consistent. Moreover, the package follows the general R philosophy of working with model objects. This means that instead of merely returning, say, a vector of probabilities, many functions will return an object containing, among other things, the said probabilities. The object can then be manipulated at one's will using various extraction, summary or plotting functions. \section{Documentation} In addition to the help pages, \pkg{actuar} ships with extensive vignettes and demonstration scripts; run the following commands at the R prompt to obtain the list of each. <>= vignette(package = "actuar") demo(package = "actuar") @ \section{Collaboration and citation} If you use R or \pkg{actuar} for actuarial analysis, please cite the software in publications. For information on how to cite the software, use: <>= citation() citation("actuar") @ \section*{Acknowledgments} The package would not be at this stage of development without the stimulating contribution of Sébastien Auclair, Christophe Dutang, Nicholas Langevin, Xavier Milhaud, Tommy Ouellet and Louis-Philippe Pouliot. This research benefited from financial support from the Natural Sciences and Engineering Research Council of Canada and from the \emph{Chaire d'actuariat} (Actuarial Science Foundation) of Université Laval. \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/vignettes/coverage.Rnw0000644000176200001440000002332215147745722016044 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Complete formulas used by coverage} %\VignettePackage{actuar} %\SweaveUTF8 \title{Complete formulas used by \code{coverage}} \author{Vincent Goulet \\ Université Laval} \date{} <>= library(actuar) @ \begin{document} \maketitle Function \code{coverage} of \pkg{actuar} defines a new function to compute the probability density function (pdf) of cumulative distribution function (cdf) of any probability law under the following insurance coverage modifications: ordinary or franchise deductible, limit, coinsurance, inflation. In addition, the function can return the distribution of either the payment per loss or the payment per payment random variable. This terminology refers to whether or not the insurer knows that a loss occurred. For the exact definitions of the terms as used by \code{coverage}, see Chapter~5 of \cite{LossModels2e}. In the presence of a deductible, four random variables can be defined: \begin{enumerate} \item $Y^P$, the payment per payment with an ordinary deductible; \item $Y^L$, the payment per loss with an ordinary deductible; \item $\tilde{Y}^P$, the payment per payment with a franchise deductible; \item $\tilde{Y}^L$, the payment per loss with a franchise deductible. \end{enumerate} The most common case in insurance applications is the distribution of the amount paid per payment with an ordinary deductible, $Y^P$. Hence, it is the default in \code{coverage}. When there is no deductible, all four random variables are equivalent. This document presents the definitions of the above four random variables and their corresponding cdf and pdf for a deductible $d$, a limit $u$, a coinsurance level $\alpha$ and an inflation rate $r$. An illustrative plot of each cdf and pdf is also included. In these plots, a dot indicates a probability mass at the given point. In definitions below, $X$ is the nonnegative random variable of the losses with cdf $F_X(\cdot)$ and pdf $f_X(\cdot)$. \bibliography{actuar} <>= deductible <- 5 limit <- 13 @ \section{Payment per payment, ordinary deductible} <>= pgammaL <- coverage(cdf = pgamma, deductible = deductible, limit = limit, per.loss = TRUE) dgammaL <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, per.loss = TRUE) pgammaP <- coverage(cdf = pgamma, deductible = deductible, limit = limit) dgammaP <- coverage(dgamma, pgamma, deductible = deductible, limit = limit) d <- deductible u <- limit - d e <- 0.001 ylim <- c(0, dgammaL(0, 5, 0.6)) @ \begin{align*} Y^P &= \begin{cases} \alpha ((1 + r) X - d), & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha (u - d), & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{Y^P}(y) &= \begin{cases} 0, & y = 0 \\ \D\frac{F_X \left( \frac{y + \alpha d}{\alpha (1 + r)} \right) - F_X \left( \frac{d}{1 + r} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & 0 < y < \alpha (u - d) \\ 1, & y \geq \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaP(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaP(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \\ f_{Y^P}(y) &= \begin{cases} 0, & y = 0 \\ \left( \D\frac{1}{\alpha (1 + r)} \right) \D\frac{f_X \left( \frac{y + \alpha d}{\alpha(1 + r)} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & 0 < y < \alpha (u - d) \\ \D\frac{1 - F_X \Big( \frac{u}{1 + r} \Big)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & y = \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaP(x, 5, 0.6), from = 0 + e, to = u - e, xlim = c(0, limit), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) points(u, dgammaP(u, 5, 0.6), pch = 16) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \end{align*} \section{Payment per loss, ordinary deductible} \begin{align*} Y^L &= \begin{cases} 0, & X < \D \frac{d}{1 + r} \\ \alpha ((1 + r) X - d), & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha (u - d), & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{Y^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & y = 0 \\ F_X \left( \D\frac{y + \alpha d}{\alpha(1 + r)} \right), & 0 < y < \alpha (u - d) \\ 1, & y \geq \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaL(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaL(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \\ f_{Y^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & y = 0 \\ \D\frac{1}{\alpha (1 + r)} f_X \left( \D\frac{y + \alpha d}{\alpha(1 + r)} \right), & 0 < y < \alpha (u - d) \\ 1 - F_X \left( \D\frac{u}{1 + r} \right), & y = \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = u - e, xlim = c(0, limit), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) points(c(0, u), dgammaL(c(0, u), 5, 0.6), pch = 16) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \end{align*} \section{Payment per payment, franchise deductible} <>= pgammaL <- coverage(cdf = pgamma, deductible = deductible, limit = limit, per.loss = TRUE, franchise = TRUE) dgammaL <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, per.loss = TRUE, franchise = TRUE) pgammaP <- coverage(cdf = pgamma, deductible = deductible, limit = limit, franchise = TRUE) dgammaP <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, franchise = TRUE) d <- deductible u <- limit e <- 0.001 ylim <- c(0, dgammaL(0, 5, 0.6)) @ \begin{align*} \tilde{Y}^P &= \begin{cases} \alpha (1 + r) X, & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha u, & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{\tilde{Y}^P}(y) &= \begin{cases} 0, & 0 \leq y \leq \alpha d \\ \D\frac{F_X \left( \frac{y}{\alpha (1 + r)} \right) - F_X \left( \frac{d}{1 + r} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & \alpha d < y < \alpha u \\ 1, & y \geq \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaP(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit + d), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaP(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \\ f_{\tilde{Y}^P}(y) &= \begin{cases} 0, & 0 \leq y \leq \alpha d \\ \left( \D\frac{1}{\alpha (1 + r)} \right) \D\frac{f_X \left( \frac{y}{\alpha(1 + r)} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & \alpha d < y < \alpha u \\ \D\frac{1 - F_X \Big( \frac{u}{1 + r} \Big)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & y = \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaP(x, 5, 0.6), from = d + e, to = u - e, xlim = c(0, limit + d), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = d, add = TRUE, lwd = 2) points(u, dgammaP(u, 5, 0.6), pch = 16) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \end{align*} \section{Payment per loss, franchise deductible} \begin{align*} \tilde{Y}^L &= \begin{cases} 0, & X < \D \frac{d}{1 + r} \\ \alpha (1 + r) X, & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha u, & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{\tilde{Y}^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & 0 \leq y \leq \alpha d \\ F_X \left( \D\frac{y}{\alpha(1 + r)} \right), & \alpha d < y < \alpha u \\ 1, & y \geq \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaL(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit + d), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaL(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \\ f_{\tilde{Y}^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & y = 0 \\ \D\frac{1}{\alpha (1 + r)} f_X \left( \D\frac{y}{\alpha(1 + r)} \right), & \alpha d < y < \alpha u \\ 1 - F_X \left( \D\frac{u}{1 + r} \right), & y = \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaL(x, 5, 0.6), from = d + e, to = u - e, xlim = c(0, limit + d), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = d, add = TRUE, lwd = 2) points(c(0, u), dgammaL(c(0, u), 5, 0.6), pch = 16) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \end{align*} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/vignettes/risk.Rnw0000644000176200001440000007576715147745722015245 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Risk and ruin theory} %\VignettePackage{actuar} %\SweaveUTF8 \title{Risk and ruin theory features of \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} %% Additional math commands \newcommand{\VaR}{\mathrm{VaR}} \newcommand{\CTE}{\mathrm{CTE}} <>= library(actuar) options(width = 52, digits = 4) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} Risk theory refers to a body of techniques to model and measure the risk associated with a portfolio of insurance contracts. A first approach consists in modeling the distribution of total claims over a fixed period of time using the classical collective model of risk theory. A second input of interest to the actuary is the evolution of the surplus of the insurance company over many periods of time. In \emph{ruin theory}, the main quantity of interest is the probability that the surplus becomes negative, in which case technical ruin of the insurance company occurs. The interested reader can read more on these subjects in \cite{LossModels4e,Gerber_MRT,DenuitCharpentier1,MART:2e}, among others. The current version of \pkg{actuar} \citep{actuar} contains four visible functions related to the above problems: two for the calculation of the aggregate claim amount distribution and two for ruin probability calculations. \section{The collective risk model} \label{sec:collective-risk-model} Let random variable $S$ represent the aggregate claim amount (or total amount of claims) of a portfolio of independent risks over a fixed period of time, random variable $N$ represent the number of claims (or frequency) in the portfolio over that period, and random variable $C_j$ represent the amount of claim $j$ (or severity). Then, we have the random sum \begin{equation} \label{eq:definition-S} S = C_1 + \dots + C_N, \end{equation} where we assume that $C_1, C_2, \dots$ are mutually independent and identically distributed random variables each independent of $N$. The task at hand consists in evaluating numerically the cdf of $S$, given by \begin{align} \label{eq:cdf-S} F_S(x) &= \Pr[S \leq x] \notag \\ &= \sum_{n = 0}^\infty \Pr[S \leq x|N = n] p_n \notag \\ &= \sum_{n = 0}^\infty F_C^{*n}(x) p_n, \end{align} where $F_C(x) = \Pr[C \leq x]$ is the common cdf of $C_1, \dots, C_n$, $p_n = \Pr[N = n]$ and $F_C^{*n}(x) = \Pr[C_1 + \dots + C_n \leq x]$ is the $n$-fold convolution of $F_C(\cdot)$. If $C$ is discrete on $0, 1, 2, \dots$, one has \begin{equation} \label{eq:convolution-formula} F_C^{*k}(x) = \begin{cases} I\{x \geq 0\}, & k = 0 \\ F_C(x), & k = 1 \\ \sum_{y = 0}^x F_C^{*(k - 1)}(x - y) f_C(y), & k = 2, 3, \dots, \end{cases} \end{equation} where $I\{\mathcal{A}\} = 1$ if $\mathcal{A}$ is true and $I\{\mathcal{A}\} = 0$ otherwise. \section{Discretization of claim amount distributions} \label{sec:discretization} Some numerical techniques to compute the aggregate claim amount distribution (see \autoref{sec:aggregate}) require a discrete arithmetic claim amount distribution; that is, a distribution defined on $0, h, 2h, \dots$ for some step (or span, or lag) $h$. The package provides function \code{discretize} to discretize a continuous distribution. (The function can also be used to modify the support of an already discrete distribution, but this requires additional care.) Let $F(x)$ denote the cdf of the distribution to discretize on some interval $(a, b)$ and $f_x$ denote the probability mass at $x$ in the discretized distribution. Currently, \code{discretize} supports the following four discretization methods. \begin{enumerate} \item Upper discretization, or forward difference of $F(x)$: \begin{equation} \label{eq:discretization:upper} f_x = F(x + h) - F(x) \end{equation} for $x = a, a + h, \dots, b - h$. The discretized cdf is always above the true cdf. \item Lower discretization, or backward difference of $F(x)$: \begin{equation} \label{eq:discretization:lower} f_x = \begin{cases} F(a), & x = a \\ F(x) - F(x - h), & x = a + h, \dots, b. \end{cases} \end{equation} The discretized cdf is always under the true cdf. \item Rounding of the random variable, or the midpoint method: \begin{equation} \label{eq:discretization:midpoint} f_x = \begin{cases} F(a + h/2), & x = a \\ F(x + h/2) - F(x - h/2), & x = a + h, \dots, b - h. \end{cases} \end{equation} The true cdf passes exactly midway through the steps of the discretized cdf. \item Unbiased, or local matching of the first moment method: \begin{equation} \label{eq:discretization:unbiased} f_x = \begin{cases} \dfrac{\E{X \wedge a} - \E{X \wedge a + h}}{h} + 1 - F(a), & x = a \\ \dfrac{2 \E{X \wedge x} - \E{X \wedge x - h} - \E{X \wedge x + h}}{h}, & a < x < b \\ \dfrac{\E{X \wedge b} - \E{X \wedge b - h}}{h} - 1 + F(b), & x = b. \end{cases} \end{equation} The discretized and the true distributions have the same total probability and expected value on $(a, b)$. \end{enumerate} \autoref{fig:discretization-methods} illustrates the four methods. It should be noted that although very close in this example, the rounding and unbiased methods are not identical. \begin{figure}[t] \centering <>= fu <- discretize(plnorm(x), method = "upper", from = 0, to = 5) fl <- discretize(plnorm(x), method = "lower", from = 0, to = 5) fr <- discretize(plnorm(x), method = "rounding", from = 0, to = 5) fb <- discretize(plnorm(x), method = "unbiased", from = 0, to = 5, lev = levlnorm(x)) par(mfrow = c(2, 2), mar = c(5, 2, 4, 2)) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Upper", ylab = "F(x)") plot(stepfun(0:4, diffinv(fu)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Lower", ylab = "F(x)") plot(stepfun(0:5, diffinv(fl)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Rounding", ylab = "F(x)") plot(stepfun(0:4, diffinv(fr)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Unbiased", ylab = "F(x)") plot(stepfun(0:5, diffinv(fb)), pch = 20, add = TRUE) ## curve(plnorm(x), from = 0, to = 5, lwd = 2, ylab = "F(x)") ## par(col = "blue") ## plot(stepfun(0:4, diffinv(fu)), pch = 19, add = TRUE) ## par(col = "red") ## plot(stepfun(0:5, diffinv(fl)), pch = 19, add = TRUE) ## par(col = "green") ## plot(stepfun(0:4, diffinv(fr)), pch = 19, add = TRUE) ## par(col = "magenta") ## plot(stepfun(0:5, diffinv(fb)), pch = 19, add = TRUE) ## legend(3, 0.3, legend = c("upper", "lower", "rounding", "unbiased"), ## col = c("blue", "red", "green", "magenta"), lty = 1, pch = 19, ## text.col = "black") @ \caption{Comparison of four discretization methods} \label{fig:discretization-methods} \end{figure} Usage of \code{discretize} is similar to R's plotting function \code{curve}. The cdf to discretize and, for the unbiased method only, the limited expected value function are passed to \code{discretize} as expressions in \code{x}. The other arguments are the upper and lower bounds of the discretization interval, the step $h$ and the discretization method. For example, upper and unbiased discretizations of a Gamma$(2, 1)$ distribution on $(0, 17)$ with a step of $0.5$ are achieved with, respectively, <>= fx <- discretize(pgamma(x, 2, 1), method = "upper", from = 0, to = 17, step = 0.5) fx <- discretize(pgamma(x, 2, 1), method = "unbiased", lev = levgamma(x, 2, 1), from = 0, to = 17, step = 0.5) @ Function \code{discretize} is written in a modular fashion making it simple to add other discretization methods if needed. \section{Calculation of the aggregate claim amount distribution} \label{sec:aggregate} Function \code{aggregateDist} serves as a unique front end for various methods to compute or approximate the cdf of the aggregate claim amount random variable $S$. Currently, five methods are supported. \begin{enumerate} \item Recursive calculation using the algorithm of \cite{Panjer_81}. This requires the severity distribution to be discrete arithmetic on $0, 1, 2, \dots, m$ for some monetary unit and the frequency distribution to be a member of either the $(a, b, 0)$ or $(a, b, 1)$ class of distributions \citep{LossModels4e}. (These classes contain the Poisson, binomial, negative binomial and logarithmic distributions and their zero-truncated and zero-modified extensions allowing for a zero or arbitrary mass at $x = 0$.) The general recursive formula is: \begin{displaymath} f_S(x) = \frac{(p_1 - (a + b)p_0)f_C(x) + \sum_{y=1}^{\min(x, m)}(a + by/x)f_C(y)f_S(x - y)}{1 - a f_C(0)}, \end{displaymath} with starting value $f_S(0) = P_N(f_C(0))$, where $P_N(\cdot)$ is the probability generating function of $N$. Probabilities are computed until their sum is arbitrarily close to 1. The recursions are done in C to dramatically increase speed. One difficulty the programmer is facing is the unknown length of the output. This was solved using a common, simple and fast technique: first allocate an arbitrary amount of memory and double this amount each time the allocated space gets full. \item Exact calculation by numerical convolutions using \eqref{eq:cdf-S} and \eqref{eq:convolution-formula}. This also requires a discrete severity distribution. However, there is no restriction on the shape of the frequency distribution. The package merely implements the sum \eqref{eq:cdf-S}, the convolutions being computed with R's function \code{convolve}, which in turn uses the Fast Fourier Transform. This approach is practical for small problems only, even on today's fast computers. \item Normal approximation of the cdf, that is \begin{equation} \label{eq:normal-approximation} F_S(x) \approx \Phi \left( \frac{x - \mu_S}{\sigma_S} \right), \end{equation} where $\mu_S = \E{S}$ and $\sigma_S^2 = \VAR{S}$. For most realistic models, this approximation is rather crude in the tails of the distribution. \item Normal Power II approximation of the cdf, that is \begin{equation} \label{eq:np2-approximation} F_S(x) \approx \Phi \left( -\frac{3}{\gamma_S} + \sqrt{\frac{9}{\gamma_S^2} + 1 + \frac{6}{\gamma_S} \frac{x - \mu_S}{\sigma_S}} \right), \end{equation} where $\gamma_S = \E{(S - \mu_S)^3}/\sigma_S^{3/2}$. The approximation is valid for $x > \mu_S$ only and performs reasonably well when $\gamma_S < 1$. See \cite{Daykin_et_al} for details. \item Simulation of a random sample from $S$ and approximation of $F_S(x)$ by the empirical cdf \begin{equation} F_n(x) = \frac{1}{n} \sum_{j = 1}^n I\{x_j \leq x\}. \end{equation} The simulation itself is done with function \code{simul} (see the \code{"simulation"} vignette). This function admits very general hierarchical models for both the frequency and the severity components. \end{enumerate} Here also, adding other methods to \code{aggregateDist} is simple due to its modular conception. The arguments of \code{aggregateDist} differ according to the chosen calculation method; see the help page for details. One interesting argument to note is \code{x.scale} to specify the monetary unit of the severity distribution. This way, one does not have to mentally do the conversion between the support of $0, 1, 2, \dots$ assumed by the recursive and convolution methods, and the true support of $S$. The recursive method fails when the expected number of claims is so large that $f_S(0)$ is numerically equal to zero. One solution proposed by \citet{LossModels4e} consists in dividing the appropriate parameter of the frequency distribution by $2^n$, with $n$ such that $f_S(0) > 0$ and the recursions can start. One then computes the aggregate claim amount distribution using the recursive method and then convolves the resulting distribution $n$ times with itself to obtain the final distribution. Function \code{aggregateDist} supports this procedure through its argument \code{convolve}. A common problem with the recursive method is failure to obtain a cumulative distribution function that reaching (close to) $1$. This is usually due to too coarse a discretization of the severity distribution. One should make sure to use a small enough discretization step and to discretize the severity distribution far in the right tail. The function \code{aggregateDist} returns an object of class \code{"aggregateDist"} inheriting from the \code{"function"} class. Thus, one can use the object as a function to compute the value of $F_S(x)$ in any $x$. For illustration purposes, consider the following model: the distribution of $S$ is a compound Poisson with parameter $\lambda = 10$ and severity distribution Gamma$(2, 1)$. To obtain an approximation of the cdf of $S$ we first discretize the gamma distribution on $(0, 22)$ with the unbiased method and a step of $0.5$, and then use the recursive method in \code{aggregateDist}: <>= fx <- discretize(pgamma(x, 2, 1), method = "unbiased", from = 0, to = 22, step = 0.5, lev = levgamma(x, 2, 1)) Fs <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 10, x.scale = 0.5) summary(Fs) @ Although useless here, the following is essentially equivalent, except in the far right tail for numerical reasons: <>= Fsc <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 5, convolve = 1, x.scale = 0.5) summary(Fsc) @ We return to object \code{Fs}. It contains an empirical cdf with support <>= knots(Fs) @ A nice graph of this function is obtained with a method of \code{plot} (see \autoref{fig:Fs}): <>= plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) @ \begin{figure}[t] \centering <>= plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) @ \caption{Graphic of the empirical cdf of $S$ obtained with the recursive method} \label{fig:Fs} \end{figure} The package defines a few summary methods to extract information from \code{"aggregateDist"} objects. First, there are methods of \code{mean} and \code{quantile} to easily compute the mean and obtain the quantiles of the approximate distribution: <>= mean(Fs) quantile(Fs) quantile(Fs, 0.999) @ Second, a method of \texttt{diff} gives easy access to the underlying probability mass function: <>= diff(Fs) @ Of course, this is defined (and makes sense) for the recursive, direct convolution and simulation methods only. Third, the package introduces the generic functions \code{VaR} and \code{CTE} (with alias \code{TVaR}) with methods for objects of class \code{"aggregateDist"}. The former computes the value-at-risk $\VaR_\alpha$ such that \begin{equation} \label{eq:VaR} \Pr[S \leq \VaR_\alpha] = \alpha, \end{equation} where $\alpha$ is the confidence level. Thus, the value-at-risk is nothing else than a quantile. As for the method of \code{CTE}, it computes the conditional tail expectation (also called Tail Value-at-Risk) \begin{equation} \label{eq:CTE} \CTE_\alpha = \E{S|S > \VaR_\alpha}. \end{equation} Here are examples using object \code{Fs} obtained above: <>= VaR(Fs) CTE(Fs) @ To conclude on the subject, \autoref{fig:Fs-comparison} shows the cdf of $S$ using five of the many combinations of discretization and calculation method supported by \pkg{actuar}. \begin{figure}[t] \centering <>= fx.u <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "upper") Fs.u <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.u, lambda = 10, x.scale = 0.5) fx.l <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "lower") Fs.l <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.l, lambda = 10, x.scale = 0.5) Fs.n <- aggregateDist("normal", moments = c(20, 60)) Fs.s <- aggregateDist("simulation", model.freq = expression(y = rpois(10)), model.sev = expression(y = rgamma(2, 1)), nb.simul = 10000) par(col = "black") plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60), sub = "") par(col = "blue") plot(Fs.u, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "red") plot(Fs.l, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "green") plot(Fs.s, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "magenta") plot(Fs.n, add = TRUE, sub = "") legend(30, 0.4, c("recursive + unbiased", "recursive + upper", "recursive + lower", "simulation", "normal approximation"), col = c("black", "blue", "red", "green", "magenta"), lty = 1, text.col = "black") @ \caption{Comparison between the empirical or approximate cdf of $S$ obtained with five different methods} \label{fig:Fs-comparison} \end{figure} \section{The continuous time ruin model} \label{sec:ruin-model} We now turn to the multi-period ruin problem. Let $U(t)$ denote the surplus of an insurance company at time $t$, $c(t)$ denote premiums collected through time $t$, and $S(t)$ denote aggregate claims paid through time $t$. If $u$ is the initial surplus at time $t = 0$, then a mathematically convenient definition of $U(t)$ is \begin{equation} \label{eq:definition-surplus} U(t) = u + c(t) - S(t). \end{equation} As mentioned previously, technical ruin of the insurance company occurs when the surplus becomes negative. Therefore, the definition of the infinite time probability of ruin is \begin{equation} \label{eq:definition-ruin} \psi(u) = \Pr[U(t) < 0 \text{ for some } t \geq 0]. \end{equation} We define some other quantities needed in the sequel. Let $N(t)$ denote the number of claims up to time $t \geq 0$ and $C_j$ denote the amount of claim $j$. Then the definition of $S(t)$ is analogous to \eqref{eq:definition-S}: \begin{equation} \label{eq:definition-S(t)} S(t) = C_1 + \dots + C_{N(t)}, \end{equation} assuming $N(0) = 0$ and $S(t) = 0$ as long as $N(t) = 0$. Furthermore, let $T_j$ denote the time when claim $j$ occurs, such that $T_1 < T_2 < T_3 < \dots$ Then the random variable of the interarrival (or wait) time between claim $j - 1$ and claim $j$ is defined as $W_1 = T_1$ and \begin{equation} \label{eq:definition-wait} W_j = T_j - T_{j - 1}, \quad j \geq 2. \end{equation} For the rest of this discussion, we make the following assumptions: \begin{enumerate} \item premiums are collected at a constant rate $c$, hence $c(t) = ct$; \item the sequence $\{T_j\}_{j \geq 1}$ forms an ordinary renewal process, with the consequence that random variables $W_1, W_2, \dots$ are independent and identically distributed; \item claim amounts $C_1, C_2, \dots$ are independent and identically distributed. \end{enumerate} \section{Adjustment coefficient} \label{sec:adjustment-coefficient} The quantity known as the adjustment coefficient $\rho$ hardly has any physical interpretation, but it is useful as an approximation to the probability of ruin since we have the inequality \begin{displaymath} \psi(u) \leq e^{-\rho u}, \quad u \geq 0. \end{displaymath} The adjustment coefficient is defined as the smallest strictly positive solution (if it exists) of the Lundberg equation \begin{equation} \label{eq:definition-adjcoef} h(t) = \E{e^{t C - t c W}} = 1, \end{equation} where the premium rate $c$ satisfies the positive safety loading constraint $\E{C - cW} < 0$. If $C$ and $W$ are independent, as in the most common models, then the equation can be rewritten as \begin{equation} \label{eq:definition-adjcoef-ind} h(t) = M_C(t) M_W(-tc) = 1. \end{equation} Function \code{adjCoef} of \pkg{actuar} computes the adjustment coefficient $\rho$ from the following arguments: either the two moment generating functions $M_C(t)$ and $M_W(t)$ (thereby assuming independence) or else function $h(t)$; the premium rate $c$; the upper bound of the support of $M_C(t)$ or any other upper bound for $\rho$. For example, if $W$ and $C$ are independent and each follow an exponential distribution, $W$ with parameter $2$ and $C$ with parameter $1$, and the premium rate is $c = 2.4$ (for a safety loading of 20\% using the expected value premium principle), then the adjustment coefficient is <>= adjCoef(mgf.claim = mgfexp(x), mgf.wait = mgfexp(x, 2), premium.rate = 2.4, upper = 1) @ The function also supports models with proportional or excess-of-loss reinsurance \citep{Centeno_02}. Under the first type of treaty, an insurer pays a proportion $\alpha$ of every loss and the rest is paid by the reinsurer. Then, for fixed $\alpha$ the adjustment coefficient is the solution of \begin{equation} \label{eq:definition-adjcoef-prop} h(t) = \E{e^{t \alpha C - t c(\alpha) W}} = 1. \end{equation} Under an excess-of-loss treaty, the primary insurer pays each claim up to a limit $L$. Again, for fixed $L$, the adjustment coefficient is the solution of \begin{equation} \label{eq:definition-adjcoef-xl} h(t) = \E{e^{t \min(C, L) - t c(L) W}} = 1. \end{equation} For models with reinsurance, \code{adjCoef} returns an object of class \code{"adjCoef"} inheriting from the \code{"function"} class. One can then use the object to compute the adjustment coefficient for any retention rate $\alpha$ or retention limit $L$. The package also defines a method of \code{plot} for these objects. For example, using the same assumptions as above with proportional reinsurance and a 30\% safety loading for the reinsurer, the adjustment coefficient as a function of $\alpha \in [0, 1]$ is (see \autoref{fig:adjcoef} for the graph): <>= mgfx <- function(x, y) mgfexp(x * y) p <- function(x) 2.6 * x - 0.2 rho <- adjCoef(mgfx, mgfexp(x, 2), premium = p, upper = 1, reins = "prop", from = 0, to = 1) rho(c(0.75, 0.8, 0.9, 1)) plot(rho) @ \begin{figure}[t] \centering <>= plot(rho) @ \caption{Adjustment coefficient as a function of the retention rate} \label{fig:adjcoef} \end{figure} \section{Probability of ruin} \label{sec:ruin} In this subsection, we always assume that interarrival times and claim amounts are independent. The main difficulty with the calculation of the infinite time probability of ruin lies in the lack of explicit formulas except for the most simple models. If interarrival times are Exponential$(\lambda)$ distributed (Poisson claim number process) and claim amounts are Exponential$(\beta)$ distributed, then \begin{equation} \label{eq:ruin-cramer-lundberg} \psi(u) = \frac{\lambda}{c \beta}\, e^{-(\beta - \lambda/c) u}. \end{equation} If the frequency assumption of this model is defensible, the severity assumption can hardly be used beyond illustration purposes. Fortunately, phase-type distributions have come to the rescue since the early 1990s. \cite{AsmussenRolski_91} first show that in the classical Cramér--Lundberg model where interarrival times are Exponential$(\lambda)$ distributed, if claim amounts are Phase-type$(\mat{\pi}, \mat{T})$ distributed, then $\psi(u) = 1 - F(u)$, where $F$ is Phase-type$(\mat{\pi}_+, \mat{Q})$ with \begin{equation} \label{eq:prob-ruin:cramer-lundberg} \begin{split} \mat{\pi}_+ &= - \frac{\lambda}{c}\, \mat{\pi} \mat{T}^{-1} \\ \mat{Q} &= \mat{T} + \mat{t} \mat{\pi}_+, \end{split} \end{equation} and $\mat{t} = -\mat{T} \mat{e}$, $\mat{e}$ is a column vector with all components equal to 1; see the \code{"lossdist"} vignette for details. In the more general Sparre~Andersen model where interarrival times can have any Phase-type$(\mat{\nu}, \mat{S})$ distribution, \cite{AsmussenRolski_91} also show that using the same claim severity assumption as above, one still has $\psi(u) = 1 - F(u)$ where $F$ is Phase-type$(\mat{\pi}_+, \mat{Q})$, but with parameters \begin{equation} \label{eq:prob-ruin:sparre:pi+} \mat{\pi}_+ = \frac{\mat{e}^\prime (\mat{Q} - \mat{T})}{% c \mat{e}^\prime \mat{t}} \end{equation} and $\mat{Q}$ solution of \begin{equation} \label{eq:eq:prob-ruin:sparre:Q} \begin{split} \mat{Q} &= \Psi(\mat{Q}) \\ &= \mat{T} - \mat{t} \mat{\pi} \left[ (\mat{I}_n \otimes \mat{\nu}) (\mat{Q} \oplus \mat{S})^{-1} (\mat{I}_n \otimes \mat{s}) \right]. \end{split} \end{equation} In the above, $\mat{s} = -\mat{S} \mat{e}$, $\mat{I}_n$ is the $n \times n$ identity matrix, $\otimes$ denotes the usual Kronecker product between two matrices and $\oplus$ is the Kronecker sum defined as \begin{equation} \label{eq:kronecker-sum} \mat{A}_{m \times m} \oplus \mat{B}_{n \times n} = \mat{A} \otimes \mat{I}_n + \mat{B} \otimes \mat{I}_m. \end{equation} Function \code{ruin} of \pkg{actuar} returns a function object of class \code{"ruin"} to compute the probability of ruin for any initial surplus $u$. In all cases except the exponential/exponential model where \eqref{eq:ruin-cramer-lundberg} is used, the output object calls function \code{pphtype} to compute the ruin probabilities. Some thought went into the interface of \code{ruin}. Obviously, all models can be specified using phase-type distributions, but the authors wanted users to have easy access to the most common models involving exponential and Erlang distributions. Hence, one first states the claim amount and interarrival times models with any combination of \code{"exponential"}, \code{"Erlang"} and \code{"phase-type"}. Then, one passes the parameters of each model using lists with components named after the corresponding parameters of \code{dexp}, \code{dgamma} and \code{dphtype}. If a component \code{"weights"} is found in a list, the model is a mixture of exponential or Erlang (mixtures of phase-type are not supported). Every component of the parameter lists is recycled as needed. The following examples should make the matter clearer. (All examples use $c = 1$, the default value in \code{ruin}.) First, for the exponential/exponential model, one has <>= psi <- ruin(claims = "e", par.claims = list(rate = 5), wait = "e", par.wait = list(rate = 3)) psi psi(0:10) @ Second, for a mixture of two exponentials claim amount model and exponential interarrival times, the simplest call to \code{ruin} is <>= op <- options(width=50) @ <>= ruin(claims = "e", par.claims = list(rate = c(3, 7), weights = 0.5), wait = "e", par.wait = list(rate = 3)) @ Finally, one will obtain a function to compute ruin probabilities in a model with phase-type claim amounts and mixture of exponentials interarrival times with <>= prob <- c(0.5614, 0.4386) rates <- matrix(c(-8.64, 0.101, 1.997, -1.095), 2, 2) ruin(claims = "p", par.claims = list(prob = prob, rates = rates), wait = "e", par.wait = list(rate = c(5, 1), weights = c(0.4, 0.6))) @ To ease plotting of the probability of ruin function, the package provides a method of \code{plot} for objects returned by \code{ruin} that is a simple wrapper for \code{curve} (see \autoref{fig:prob-ruin}): <>= psi <- ruin(claims = "p", par.claims = list(prob = prob, rates = rates), wait = "e", par.wait = list(rate = c(5, 1), weights = c(0.4, 0.6))) plot(psi, from = 0, to = 50) @ <>= options(op) @ \begin{figure}[t] \centering <>= plot(psi, from = 0, to = 50) @ \caption{Graphic of the probability of ruin as a function of the initial surplus $u$} \label{fig:prob-ruin} \end{figure} \section{Approximation to the probability of ruin} \label{sec:beekman} When the model for the aggregate claim process \eqref{eq:definition-S(t)} does not fit nicely into the framework of the previous section, one can compute ruin probabilities using the so-called Beekman's convolution formula \citep{Beekman_68,BeekmanFormula_EAS}. Let the surplus process and the aggregate claim amount process be defined as in \eqref{eq:definition-surplus} and \eqref{eq:definition-S(t)}, respectively, and let $\{N(t)\}$ be a Poisson process with mean $\lambda$. As before, claim amounts $C_1, C_2, \dots$ are independent and identically distributed with cdf $P(\cdot)$ and mean $\mu = \E{C_1}$. Then the infinite time probability of ruin is given by \begin{equation} \label{eq:beekman:prob-ruin} \psi(u) = 1 - F(u), \end{equation} where $F(\cdot)$ is Compound~Geometric$(p, H)$ with \begin{equation} \label{eq:beekman:p} p = 1 - \frac{\lambda \mu}{c} \end{equation} and \begin{equation} \label{eq:beekman:H} H(x) = \int_0^x \frac{1 - P(y)}{\mu}\, dy. \end{equation} In other words, we have (compare with \eqref{eq:cdf-S}): \begin{equation} \label{eq:beekman:prob-ruin-long} \psi(u) = 1 - \sum_{n = 0}^\infty H^{*n}(u) p (1 - p)^n. \end{equation} In most practical situations, numerical evaluation of \eqref{eq:beekman:prob-ruin-long} is done using Panjer's recursive formula. This usually requires discretization of $H(\cdot)$. In such circumstances, Beekman's formula yields approximate ruin probabilities. For example, let claim amounts have a Pareto$(5, 4)$ distribution, that is \begin{displaymath} P(x) = 1 - \left( \frac{4}{4 + x} \right)^5 \end{displaymath} and $\mu = 1$. Then \begin{align*} H(x) &= \int_0^x \left( \frac{4}{4 + y} \right)^5 dy \\ &= 1 - \left( \frac{4}{4 + x} \right)^4, \end{align*} or else $H$ is Pareto$(4, 4)$. Furthermore, we determine the premium rate $c$ with the expected value premium principle and a safety loading of 20\%, that is $c = 1.2 \lambda \mu$. Thus, $p = 0.2/1.2 = 1/6$. One can get functions to compute lower bounds and upper bounds for $F(u)$ with functions \code{discretize} and \code{aggregateDist} as follows: <>= f.L <- discretize(ppareto(x, 4, 4), from = 0, to = 200, step = 1, method = "lower") f.U <- discretize(ppareto(x, 4, 4), from = 0, to = 200, step = 1, method = "upper") F.L <- aggregateDist(method = "recursive", model.freq = "geometric", model.sev = f.L, prob = 1/6) F.U <- aggregateDist(method = "recursive", model.freq = "geometric", model.sev = f.U, prob = 1/6) @ Corresponding functions for the probability of ruin $\psi(u)$ lower and upper bounds are (see \autoref{fig:beekman:prob-ruin} for the graphic): <>= psi.L <- function(u) 1 - F.U(u) psi.U <- function(u) 1 - F.L(u) u <- seq(0, 50, by = 5) cbind(lower = psi.L(u), upper = psi.U(u)) curve(psi.L, from = 0, to = 100, col = "blue") curve(psi.U, add = TRUE, col = "green") @ \begin{figure}[t] \centering <>= curve(psi.L, from = 0, to = 100, col = "blue") curve(psi.U, add = TRUE, col = "green") @ \caption{Lower and upper bounds for the probability of ruin as determined using Beekman's convolution formula.} \label{fig:beekman:prob-ruin} \end{figure} One can make the bounds as close as one wishes by reducing the discretization step. \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/vignettes/modeling.Rnw0000644000176200001440000005150315147745722016051 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Loss distributions modeling} %\VignettePackage{actuar} %\SweaveUTF8 \title{Loss modeling features of \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} <>= library(actuar) options(width = 52, digits = 4) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} One important task of actuaries is the modeling of claim amount and claim count distributions for ratemaking, loss reserving or other risk evaluation purposes. Package \pkg{actuar} features many support functions for loss distributions modeling: \begin{enumerate} \item support for heavy tail continuous distributions useful in loss severity modeling; \item support for phase-type distributions for ruin theory; \item functions to compute raw moments, limited moments and the moment generating function (when it exists) of continuous distributions; \item support for zero-truncated and zero-modified extensions of the discrete distributions commonly used in loss frequency modeling; \item extensive support of grouped data; \item functions to compute empirical raw and limited moments; \item support for minimum distance estimation using three different measures; \item treatment of coverage modifications (deductibles, limits, inflation, coinsurance). \end{enumerate} Vignette \code{"distributions"} covers the points 1--4 above in great detail. This document concentrates on points 5--8. \section{Grouped data} \label{sec:grouped-data} Grouped data is data represented in an interval-frequency manner. Typically, a grouped data set will report that there were $n_j$ claims in the interval $(c_{j - 1}, c_j]$, $j = 1, \dots, r$ (with the possibility that $c_r = \infty$). This representation is much more compact than an individual data set --- where the value of each claim is known --- but it also carries far less information. Now that storage space in computers has essentially become a non issue, grouped data has somewhat fallen out of fashion. Still, grouped data remains useful as a means to represent data, if only graphically --- for example, a histogram is nothing but a density approximation for grouped data. Moreover, various parameter estimation techniques rely on grouped data. For these reasons, \pkg{actuar} provides facilities to store, manipulate and summarize grouped data. A standard storage method is needed since there are many ways to represent grouped data in the computer: using a list or a matrix, aligning $n_j$ with $c_{j - 1}$ or with $c_j$, omitting $c_0$ or not, etc. With appropriate extraction, replacement and summary methods, manipulation of grouped data becomes similar to that of individual data. Function \code{grouped.data} creates a grouped data object similar to --- and inheriting from --- a data frame. The function accepts two types of input: \begin{enumerate} \item a vector of group boundaries $c_0, c_1, \dots, c_r$ and one or more vectors of group frequencies $n_1, \dots, n_r$ (note that there should be one more group boundary than group frequencies); \item individual data $x_1, \dots, x_n$ and either a vector of breakpoints $c_1, \dots, c_r$, a number $r$ of breakpoints or an algorithm to determine the latter. \end{enumerate} In the second case, \code{grouped.data} will group the individual data using function \code{hist}. The function always assumes that the intervals are contiguous. \begin{example} \label{ex:grouped.data-1} Consider the following already grouped data set: \begin{center} \begin{tabular}{lcc} \toprule Group & Frequency (Line 1) & Frequency (Line 2) \\ \midrule $(0, 25]$ & 30 & 26 \\ $(25, 50]$ & 31 & 33 \\ $(50, 100]$ & 57 & 31 \\ $(100, 150]$ & 42 & 19 \\ $(150, 250]$ & 65 & 16 \\ $(250, 500]$ & 84 & 11 \\ \bottomrule \end{tabular} \end{center} We can conveniently and unambiguously store this data set in R as follows: <>= x <- grouped.data(Group = c(0, 25, 50, 100, 150, 250, 500), Line.1 = c(30, 31, 57, 42, 65, 84), Line.2 = c(26, 33, 31, 19, 16, 11)) @ Internally, object \code{x} is a list with class <>= class(x) @ The package provides a suitable \code{print} method to display grouped data objects in an intuitive manner: <>= x @ \qed \end{example} \begin{example} \label{ex:grouped.data-2} Consider Data Set~B of \citet[Table~11.2]{LossModels4e}: \begin{center} \begin{tabular}{*{10}{r}} 27 & 82 & 115 & 126 & 155 & 161 & 243 & 294 & 340 & 384 \\ 457 & 680 & 855 & 877 & 974 & \np{1193} & \np{1340} & \np{1884} & \np{2558} & \np{15743} \end{tabular} \end{center} We can represent this data set as grouped data using either an automatic or a suggested number of groups (see \code{?hist} for details): <>= y <- c( 27, 82, 115, 126, 155, 161, 243, 294, 340, 384, 457, 680, 855, 877, 974, 1193, 1340, 1884, 2558, 15743) grouped.data(y) grouped.data(y, breaks = 5) @ The above grouping methods use equi-spaced breaks. This is rarely appropriate for heavily skewed insurance data. For this reason, \code{grouped.data} also supports specified breakpoints (or group boundaries): <>= grouped.data(y, breaks = c(0, 100, 200, 350, 750, 1200, 2500, 5000, 16000)) @ \qed \end{example} The package supports the most common extraction and replacement methods for \code{"grouped.data"} objects using the usual \code{[} and \code{[<-} operators. In particular, the following extraction operations are supported. (In the following, object \code{x} is the grouped data object of \autoref{ex:grouped.data-1}.) <>= x <- grouped.data(Group = c(0, 25, 50, 100, 150, 250, 500), Line.1 = c(30, 31, 57, 42, 65, 84), Line.2 = c(26, 33, 31, 19, 16, 11)) @ \begin{enumerate}[i)] \item Extraction of the vector of group boundaries (the first column): <>= x[, 1] @ \item Extraction of the vector or matrix of group frequencies (the second and third columns): <>= x[, -1] @ \item Extraction of a subset of the whole object (first three lines): <>= x[1:3, ] @ \end{enumerate} Notice how extraction results in a simple vector or matrix if either of the group boundaries or the group frequencies are dropped. As for replacement operations, the package implements the following. \begin{enumerate}[i)] \item Replacement of one or more group frequencies: <>= x[1, 2] <- 22; x x[1, c(2, 3)] <- c(22, 19); x @ \item Replacement of the boundaries of one or more groups: <>= x[1, 1] <- c(0, 20); x x[c(3, 4), 1] <- c(55, 110, 160); x @ \end{enumerate} It is not possible to replace the boundaries and the frequencies simultaneously. The mean of grouped data is \begin{equation} \hat{\mu} = \frac{1}{n} \sum_{j = 1}^r a_j n_j, \end{equation} where $a_j = (c_{j - 1} + c_j)/2$ is the midpoint of the $j$th interval, and $n = \sum_{j = 1}^r n_j$, whereas the variance is \begin{equation} \frac{1}{n} \sum_{j = 1}^r n_j (a_j - \hat{\mu})^2. \end{equation} The standard deviation is the square root of the variance. The package defines methods to easily compute the above descriptive statistics: <>= mean(x) var(x) sd(x) @ Higher empirical moments can be computed with \code{emm}; see \autoref{sec:empirical-moments}. The R function \code{hist} splits individual data into groups and draws an histogram of the frequency distribution. The package introduces a method for already grouped data. Only the first frequencies column is considered (see \autoref{fig:histogram} for the resulting graph): <>= hist(x[, -3]) @ \begin{figure}[t] \centering <>= hist(x[, -3]) @ \caption{Histogram of a grouped data object} \label{fig:histogram} \end{figure} \begin{rem} One will note that for an individual data set like \code{y} of \autoref{ex:grouped.data-2}, the following two expressions yield the same result: <>= hist(y) hist(grouped.data(y)) @ \end{rem} R has a function \code{ecdf} to compute the empirical cdf $F_n(x)$ of an individual data set: \begin{equation} \label{eq:ecdf} F_n(x) = \frac{1}{n} \sum_{j = 1}^n I\{x_j \leq x\}, \end{equation} where $I\{\mathcal{A}\} = 1$ if $\mathcal{A}$ is true and $I\{\mathcal{A}\} = 0$ otherwise. The function returns a \code{"function"} object to compute the value of $F_n(x)$ in any $x$. The approximation of the empirical cdf for grouped data is called an ogive \citep{LossModels4e,HoggKlugman}. It is obtained by joining the known values of $F_n(x)$ at group boundaries with straight line segments: \begin{equation} \tilde{F}_n(x) = \begin{cases} 0, & x \leq c_0 \\ \dfrac{(c_j - x) F_n(c_{j-1}) + (x - c_{j-1}) F_n(c_j)}{% c_j - c_{j - 1}}, & c_{j-1} < x \leq c_j \\ 1, & x > c_r. \end{cases} \end{equation} The package includes a generic function \code{ogive} with methods for individual and for grouped data. The function behaves exactly like \code{ecdf}. \begin{example} \label{ex:ogive} Consider first the grouped data set of \autoref{ex:grouped.data-1}. Function \code{ogive} returns a function to compute the ogive $\tilde{F}_n(x)$ in any point: <>= (Fnt <- ogive(x)) @ Methods for functions \code{knots} and \code{plot} allow, respectively, to obtain the knots $c_0, c_1, \dots, c_r$ of the ogive and to draw a graph (see \autoref{fig:ogive}): <>= knots(Fnt) Fnt(knots(Fnt)) plot(Fnt) @ \begin{figure}[t] \centering <>= plot(Fnt) @ \caption{Ogive of a grouped data object} \label{fig:ogive} \end{figure} To add further symmetry between functions \code{hist} and \code{ogive}, the latter also accepts in argument a vector individual data. It will call \code{grouped.data} and then computes the ogive. (Below, \code{y} is the individual data set of \autoref{ex:grouped.data-2}.) <>= (Fnt <- ogive(y)) knots(Fnt) @ \qed \end{example} A method of function \code{quantile} for grouped data objects returns linearly smoothed quantiles, that is, the inverse of the ogive evaluated at various points: <>= Fnt <- ogive(x) @ <>= quantile(x) Fnt(quantile(x)) @ Finally, a \code{summary} method for grouped data objects returns the quantiles and the mean, as is usual for individual data: <>= summary(x) @ \section{Data sets} \label{sec:data-sets} This is certainly not the most spectacular feature of \pkg{actuar}, but it remains useful for illustrations and examples: the package includes the individual dental claims and grouped dental claims data of \cite{LossModels4e}: <>= data("dental"); dental data("gdental"); gdental @ \section{Calculation of empirical moments} \label{sec:empirical-moments} The package provides two functions useful for estimation based on moments. First, function \code{emm} computes the $k$th empirical moment of a sample, whether in individual or grouped data form. For example, the following expressions compute the first three moments for individual and grouped data sets: <>= emm(dental, order = 1:3) emm(gdental, order = 1:3) @ Second, in the same spirit as \code{ecdf} and \code{ogive}, function \code{elev} returns a function to compute the empirical limited expected value --- or first limited moment --- of a sample for any limit. Again, there are methods for individual and grouped data (see \autoref{fig:elev} for the graphs): <>= lev <- elev(dental) lev(knots(lev)) plot(lev, type = "o", pch = 19) lev <- elev(gdental) lev(knots(lev)) plot(lev, type = "o", pch = 19) @ \begin{figure}[t] \centering <>= par(mfrow = c(1, 2)) plot(elev(dental), type = "o", pch = 19) plot(elev(gdental), type = "o", pch = 19) @ \caption{Empirical limited expected value function of an individual data object (left) and a grouped data object (right)} \label{fig:elev} \end{figure} \section{Minimum distance estimation} \label{sec:minimum-distance} Two methods are widely used by actuaries to fit models to data: maximum likelihood and minimum distance. The first technique applied to individual data is well covered by function \code{fitdistr} of the package \pkg{MASS} \citep{MASS}. The second technique minimizes a chosen distance function between theoretical and empirical distributions. Package \pkg{actuar} provides function \code{mde}, very similar in usage and inner working to \code{fitdistr}, to fit models according to any of the following three distance minimization methods. \begin{enumerate} \item The Cramér-von~Mises method (\code{CvM}) minimizes the squared difference between the theoretical cdf and the empirical cdf or ogive at their knots: \begin{equation} d(\theta) = \sum_{j = 1}^n w_j [F(x_j; \theta) - F_n(x_j; \theta)]^2 \end{equation} for individual data and \begin{equation} d(\theta) = \sum_{j = 1}^r w_j [F(c_j; \theta) - \tilde{F}_n(c_j; \theta)]^2 \end{equation} for grouped data. Here, $F(x)$ is the theoretical cdf of a parametric family, $F_n(x)$ is the empirical cdf, $\tilde{F}_n(x)$ is the ogive and $w_1 \geq 0, w_2 \geq 0, \dots$ are arbitrary weights (defaulting to $1$). \item The modified chi-square method (\code{chi-square}) applies to grouped data only and minimizes the squared difference between the expected and observed frequency within each group: \begin{equation} d(\theta) = \sum_{j = 1}^r w_j [n (F(c_j; \theta) - F(c_{j - 1}; \theta)) - n_j]^2, \end{equation} where $n = \sum_{j = 1}^r n_j$. By default, $w_j = n_j^{-1}$. \item The layer average severity method (\code{LAS}) applies to grouped data only and minimizes the squared difference between the theoretical and empirical limited expected value within each group: \begin{equation} d(\theta) = \sum_{j = 1}^r w_j [\LAS(c_{j - 1}, c_j; \theta) - \tilde{\LAS}_n(c_{j - 1}, c_j; \theta)]^2, \end{equation} where $\LAS(x, y) = \E{X \wedge y} - \E{X \wedge x}$, % $\tilde{\LAS}_n(x, y) = \tilde{E}_n[X \wedge y] - \tilde{E}_n[X \wedge x]$ and $\tilde{E}_n[X \wedge x]$ is the empirical limited expected value for grouped data. \end{enumerate} The arguments of \code{mde} are a data set, a function to compute $F(x)$ or $\E{X \wedge x}$, starting values for the optimization procedure and the name of the method to use. The empirical functions are computed with \code{ecdf}, \code{ogive} or \code{elev}. \begin{example} \label{ex:mde} The expressions below fit an exponential distribution to the grouped dental data set, as per example~2.21 of \cite{LossModels}: <>= op <- options(warn = -1) # hide warnings from mde() @ <>= mde(gdental, pexp, start = list(rate = 1/200), measure = "CvM") mde(gdental, pexp, start = list(rate = 1/200), measure = "chi-square") mde(gdental, levexp, start = list(rate = 1/200), measure = "LAS") @ <>= options(op) # restore warnings @ \qed \end{example} It should be noted that optimization is not always as simple to achieve as in \autoref{ex:mde}. For example, consider the problem of fitting a Pareto distribution to the same data set using the Cramér--von~Mises method: <>= mde(gdental, ppareto, start = list(shape = 3, scale = 600), measure = "CvM") @ <>= out <- try(mde(gdental, ppareto, start = list(shape = 3, scale = 600), measure = "CvM"), silent = TRUE) cat(sub(", scale", ",\n scale", out)) @ Working in the log of the parameters often solves the problem since the optimization routine can then flawlessly work with negative parameter values: <>= pparetolog <- function(x, logshape, logscale) ppareto(x, exp(logshape), exp(logscale)) (p <- mde(gdental, pparetolog, start = list(logshape = log(3), logscale = log(600)), measure = "CvM")) @ The actual estimators of the parameters are obtained with <>= exp(p$estimate) @ %$ This procedure may introduce additional bias in the estimators, though. \section{Coverage modifications} \label{sec:coverage} Let $X$ be the random variable of the actual claim amount for an insurance policy, $Y^L$ be the random variable of the amount paid per loss and $Y^P$ be the random variable of the amount paid per payment. The terminology for the last two random variables refers to whether or not the insurer knows that a loss occurred. Now, the random variables $X$, $Y^L$ and $Y^P$ will differ if any of the following coverage modifications are present for the policy: an ordinary or a franchise deductible, a limit, coinsurance or inflation adjustment \cite[see][chapter~8 for precise definitions of these terms]{LossModels4e}. \autoref{tab:coverage} summarizes the definitions of $Y^L$ and $Y^P$. \begin{table} \centering \begin{tabular}{lll} \toprule Coverage modification & Per-loss variable ($Y^L$) & Per-payment variable ($Y^P$)\\ \midrule Ordinary deductible ($d$) & $\begin{cases} 0, & X \leq d \\ X - d, & X > d \end{cases}$ & $\begin{cases} X - d, & X > d \end{cases}$ \medskip \\ Franchise deductible ($d$) & $\begin{cases} 0, & X \leq d \\ X, & X > d \end{cases}$ & $\begin{cases} X, & X > d \end{cases} $ \medskip \\ Limit ($u$) & $\begin{cases} X, & X \leq u \\ u, & X > u \end{cases}$ & $\begin{cases} X, & X \leq u \\ u, & X > u \end{cases}$ \bigskip \\ Coinsurance ($\alpha$) & $\alpha X$ & $\alpha X$ \medskip \\ Inflation ($r$) & $(1 + r)X$ & $(1 + r)X$ \\ \bottomrule \end{tabular} \caption{Coverage modifications for per-loss variable ($Y^L$) and per-payment variable ($Y^P$) as defined in \cite{LossModels4e}.} \label{tab:coverage} \end{table} Often, one will want to use data $Y^P_1, \dots, Y^P_n$ (or $Y^L_1, \dots, Y^L_n$) from the random variable $Y^P$ ($Y^L$) to fit a model on the unobservable random variable $X$. This requires expressing the pdf or cdf of $Y^P$ ($Y^L$) in terms of the pdf or cdf of $X$. Function \code{coverage} of \pkg{actuar} does just that: given a pdf or cdf and any combination of the coverage modifications mentioned above, \code{coverage} returns a function object to compute the pdf or cdf of the modified random variable. The function can then be used in modeling like any other \code{dfoo} or \code{pfoo} function. \begin{example} \label{ex:coverage} Let $Y^P$ represent the amount paid by an insurer for a policy with an ordinary deductible $d$ and a limit $u - d$ (or maximum covered loss of $u$). Then the definition of $Y^P$ is \begin{equation} Y^P = \begin{cases} X - d, & d \leq X \leq u \\ u - d, & X \geq u \end{cases} \end{equation} and its pdf is \begin{equation} \label{eq:pdf-YP} f_{Y^P}(y) = \begin{cases} 0, & y = 0 \\ \dfrac{f_X(y + d)}{1 - F_X(d)}, & 0 < y < u - d \\ \dfrac{1 - F_X(u)}{1 - F_X(d)}, & y = u - d \\ 0, & y > u - d. \end{cases} \end{equation} Assume $X$ has a gamma distribution. Then an R function to compute the pdf \eqref{eq:pdf-YP} in any $y$ for a deductible $d = 1$ and a limit $u = 10$ is obtained with \code{coverage} as follows: <>= f <- coverage(pdf = dgamma, cdf = pgamma, deductible = 1, limit = 10) f f(0, shape = 5, rate = 1) f(5, shape = 5, rate = 1) f(9, shape = 5, rate = 1) f(12, shape = 5, rate = 1) @ \qed \end{example} Note how function \code{f} in the previous example is built specifically for the coverage modifications submitted and contains as little useless code as possible. The function returned by \code{coverage} may be used for various purposes, most notably parameter estimation, as the following example illustrates. \begin{example} Let object \code{y} contain a sample of claims amounts from policies with the deductible and limit of \autoref{ex:coverage}. One can fit a gamma distribution by maximum likelihood to the claim severity distribution as follows: <>= x <- rgamma(100, 2, 0.5) y <- pmin(x[x > 1], 9) op <- options(warn = -1) # hide warnings from fitdistr() @ <>= library(MASS) fitdistr(y, f, start = list(shape = 2, rate = 0.5)) @ <>= options(op) # restore warnings @ \qed \end{example} Vignette \code{"coverage"} contains more detailed formulas for the pdf and the cdf under various combinations of coverage modifications. \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/data/0000755000176200001440000000000015147745722012460 5ustar liggesusersactuar/data/hachemeister.rda0000644000176200001440000000111215147745722015604 0ustar liggesusers]=hSQOVӠk7D4mE2bjr*t(šITM8 (88888:(tppss7O9^)C0{apzDX9" gѬ{pI8]X.B/)O瘰|ě]@'VS~kɏ?!ou3O|_xy\q|ߘ:?7s̼[g_,S:ͲpݛgT$EP~-n7F8M|}M?+3wi|G%,kP2 sϓm%\Z;*~[|_ŹZ aSߞƷļ_Su_my|_.oJb]q57ثv^Juc P&tuF{݅8eܖ}LزߖeҖ!ڈ::h/w-k~wtO @GԮactuar/data/gdental.rda0000644000176200001440000000130115147745722014561 0ustar liggesusersVn@^ۉWH Xmjxġr쐐q"yȁYN8 @Xxyfw/?yttnCA8RoNv#9wYis5i8Xő.-σ`o3)Z#pC}kh[@kYu)V@(B2r=(!r27,l!pt(Rxz~'!kW{l?16#o*UdG3)įj28Dg[F:m?I`lCo T&Ue>I-@n\ ZXk&Z':YI"Rc>k%=*O{S3GEEXmB{ML)_[]Y+Z-8Y{?WH=*i fza624]w ga۵ vY _հ ;O/1-"O$Դ Ѕ ( a3(9އSe*_VG r.&/P`בȦ. ^< 5 Y=L5M(WrSj+qf^ actuar/data/dental.rda0000644000176200001440000000014515147745722014417 0ustar liggesusers r0b```b`bfb H020piԼ 0v.P.(p(ρQ t(?B@actuar/src/0000755000176200001440000000000015151412457012325 5ustar liggesusersactuar/src/actuar.h0000644000176200001440000003103715147745722013772 0ustar liggesusers#include /*Error messages */ #define R_MSG_NA _("NaNs produced") /* Interfaces to routines from package expint */ extern double(*actuar_gamma_inc)(double,double); /* Functions accessed from .External() */ SEXP actuar_do_dpq(SEXP); SEXP actuar_do_dpq0(int, SEXP); SEXP actuar_do_dpq1(int, SEXP); SEXP actuar_do_dpq2(int, SEXP); SEXP actuar_do_dpq3(int, SEXP); SEXP actuar_do_dpq4(int, SEXP); SEXP actuar_do_dpq5(int, SEXP); SEXP actuar_do_dpq6(int, SEXP); SEXP actuar_do_random(SEXP); SEXP actuar_do_random1(int, SEXP, SEXPTYPE); SEXP actuar_do_random2(int, SEXP, SEXPTYPE); SEXP actuar_do_random3(int, SEXP, SEXPTYPE); SEXP actuar_do_random4(int, SEXP, SEXPTYPE); SEXP actuar_do_random5(int, SEXP, SEXPTYPE); SEXP actuar_do_dpqphtype(SEXP); SEXP actuar_do_dpqphtype2(int, SEXP); SEXP actuar_do_randomphtype(SEXP); SEXP actuar_do_randomphtype2(int, SEXP, SEXPTYPE); SEXP actuar_do_betaint(SEXP); SEXP actuar_do_hierarc(SEXP); SEXP actuar_do_panjer(SEXP); /* Utility functions */ /* Matrix algebra */ void actuar_expm(double *, int, double *); double actuar_expmprod(double *, double *, double *, int); void actuar_matpow(double *, int, int, double *); void actuar_solve(double *, double *, int, int, double *); /* Special integrals */ double betaint(double, double, double); double betaint_raw(double, double, double, double); /* Sampling */ int SampleSingleValue(int, double *); /* One parameter distributions */ double mexp(double, double, int); double levexp(double, double, double, int); double mgfexp(double, double, int); double dinvexp(double, double, int); double pinvexp(double, double, int, int); double qinvexp(double, double, int, int); double rinvexp(double); double minvexp(double, double, int); double levinvexp(double, double, double, int); double dlogarithmic(double, double, int); double plogarithmic(double, double, int, int); double qlogarithmic(double, double, int, int); double rlogarithmic(double); double dztpois(double, double, int); double pztpois(double, double, int, int); double qztpois(double, double, int, int); double rztpois(double); double dztgeom(double, double, int); double pztgeom(double, double, int, int); double qztgeom(double, double, int, int); double rztgeom(double); /* Two parameter distributions */ double munif(double, double, double, int); double levunif(double, double, double, double, int); double mgfunif(double, double, double, int); double mnorm(double, double, double, int); double mgfnorm(double, double, double, int); double mbeta(double, double, double, int); double levbeta(double, double, double, double, int); double mgamma(double, double, double, int); double levgamma(double, double, double, double, int); double mgfgamma(double, double, double, int); double mchisq(double, double, double, int); double levchisq(double, double, double, double, int); double mgfchisq(double, double, double, int); double dinvgamma(double, double, double, int); double pinvgamma(double, double, double, int, int); double qinvgamma(double, double, double, int, int); double rinvgamma(double, double); double minvgamma(double, double, double, int); double levinvgamma(double, double, double, double, int); double mgfinvgamma(double, double, double, int); double dinvparalogis(double, double, double, int); double pinvparalogis(double, double, double, int, int); double qinvparalogis(double, double, double, int, int); double rinvparalogis(double, double); double minvparalogis(double, double, double, int); double levinvparalogis(double, double, double, double, int); double dinvpareto(double, double, double, int); double pinvpareto(double, double, double, int, int); double qinvpareto(double, double, double, int, int); double rinvpareto(double, double); double minvpareto(double, double, double, int); double levinvpareto(double, double, double, double, int); double dinvweibull(double, double, double, int); double pinvweibull(double, double, double, int, int); double qinvweibull(double, double, double, int, int); double rinvweibull(double, double); double minvweibull(double, double, double, int); double levinvweibull(double, double, double, double, int); double dlgamma(double, double, double, int); double plgamma(double, double, double, int, int); double qlgamma(double, double, double, int, int); double rlgamma(double, double); double mlgamma(double, double, double, int); double levlgamma(double, double, double, double, int); double dllogis(double, double, double, int); double pllogis(double, double, double, int, int); double qllogis(double, double, double, int, int); double rllogis(double, double); double mllogis(double, double, double, int); double levllogis(double, double, double, double, int); double mlnorm(double, double, double, int); double levlnorm(double, double, double, double, int); double dparalogis(double, double, double, int); double pparalogis(double, double, double, int, int); double qparalogis(double, double, double, int, int); double rparalogis(double, double); double mparalogis(double, double, double, int); double levparalogis(double, double, double, double, int); double dpareto(double, double, double, int); double ppareto(double, double, double, int, int); double qpareto(double, double, double, int, int); double rpareto(double, double); double mpareto(double, double, double, int); double levpareto(double, double, double, double, int); double dpareto1(double, double, double, int); double ppareto1(double, double, double, int, int); double qpareto1(double, double, double, int, int); double rpareto1(double, double); double mpareto1(double, double, double, int); double levpareto1(double, double, double, double, int); double mweibull(double, double, double, int); double levweibull(double, double, double, double, int); double dgumbel(double, double, double, int); double pgumbel(double, double, double, int, int); double qgumbel(double, double, double, int, int); double rgumbel(double, double); double mgumbel(double, double, double, int); double mgfgumbel(double, double, double, int); double dinvgauss(double, double, double, int); double pinvgauss(double, double, double, int, int); double qinvgauss(double, double, double, int, int, double, int, int); double rinvgauss(double, double); double minvgauss(double, double, double, int); double levinvgauss(double, double, double, double, int); double mgfinvgauss(double, double, double, int); double dztnbinom(double, double, double, int); double pztnbinom(double, double, double, int, int); double qztnbinom(double, double, double, int, int); double rztnbinom(double, double); double dztbinom(double, double, double, int); double pztbinom(double, double, double, int, int); double qztbinom(double, double, double, int, int); double rztbinom(double, double); double dzmlogarithmic(double, double, double, int); double pzmlogarithmic(double, double, double, int, int); double qzmlogarithmic(double, double, double, int, int); double rzmlogarithmic(double, double); double dzmpois(double, double, double, int); double pzmpois(double, double, double, int, int); double qzmpois(double, double, double, int, int); double rzmpois(double, double); double dzmgeom(double, double, double, int); double pzmgeom(double, double, double, int, int); double qzmgeom(double, double, double, int, int); double rzmgeom(double, double); double dpoisinvgauss(double, double, double, int); double ppoisinvgauss(double, double, double, int, int); double qpoisinvgauss(double, double, double, int, int); double rpoisinvgauss(double, double); /* Three parameter distributions */ double dburr(double, double, double, double, int); double pburr(double, double, double, double, int, int); double qburr(double, double, double, double, int, int); double rburr(double, double, double); double mburr(double, double, double, double, int); double levburr(double, double, double, double, double, int); double dgenpareto(double, double, double, double, int); double pgenpareto(double, double, double, double, int, int); double qgenpareto(double, double, double, double, int, int); double rgenpareto(double, double, double); double mgenpareto(double, double, double, double, int); double levgenpareto(double, double, double, double, double, int); double dinvburr(double, double, double, double, int); double pinvburr(double, double, double, double, int, int); double qinvburr(double, double, double, double, int, int); double rinvburr(double, double, double); double minvburr(double, double, double, double, int); double levinvburr(double, double, double, double, double, int); double dinvtrgamma(double, double, double, double, int); double pinvtrgamma(double, double, double, double, int, int); double qinvtrgamma(double, double, double, double, int, int); double rinvtrgamma(double, double, double); double minvtrgamma(double, double, double, double, int); double levinvtrgamma(double, double, double, double, double, int); double dtrgamma(double, double, double, double, int); double ptrgamma(double, double, double, double, int, int); double qtrgamma(double, double, double, double, int, int); double rtrgamma(double, double, double); double mtrgamma(double, double, double, double, int); double levtrgamma(double, double, double, double, double, int); double dpareto2(double, double, double, double, int); double ppareto2(double, double, double, double, int, int); double qpareto2(double, double, double, double, int, int); double rpareto2(double, double, double); double mpareto2(double, double, double, double, int); double levpareto2(double, double, double, double, double, int); double dpareto3(double, double, double, double, int); double ppareto3(double, double, double, double, int, int); double qpareto3(double, double, double, double, int, int); double rpareto3(double, double, double); double mpareto3(double, double, double, double, int); double levpareto3(double, double, double, double, double, int); double dzmnbinom(double, double, double, double, int); double pzmnbinom(double, double, double, double, int, int); double qzmnbinom(double, double, double, double, int, int); double rzmnbinom(double, double, double); double dzmbinom(double, double, double, double, int); double pzmbinom(double, double, double, double, int, int); double qzmbinom(double, double, double, double, int, int); double rzmbinom(double, double, double); /* Four parameter distributions */ double dgenbeta(double, double, double, double, double, int); double pgenbeta(double, double, double, double, double, int, int); double qgenbeta(double, double, double, double, double, int, int); double rgenbeta(double, double, double, double); double mgenbeta(double, double, double, double, double, int); double levgenbeta(double, double, double, double, double, double, int); double dtrbeta(double, double, double, double, double, int); double ptrbeta(double, double, double, double, double, int, int); double qtrbeta(double, double, double, double, double, int, int); double rtrbeta(double, double, double, double); double mtrbeta(double, double, double, double, double, int); double levtrbeta(double, double, double, double, double, double, int); double dpareto4(double, double, double, double, double, int); double ppareto4(double, double, double, double, double, int, int); double qpareto4(double, double, double, double, double, int, int); double rpareto4(double, double, double, double); double mpareto4(double, double, double, double, double, int); double levpareto4(double, double, double, double, double, double, int); /* Five parameter distributions */ double dfpareto(double, double, double, double, double, double, int); double pfpareto(double, double, double, double, double, double, int, int); double qfpareto(double, double, double, double, double, double, int, int); double rfpareto(double, double, double, double, double); double mfpareto(double, double, double, double, double, double, int); double levfpareto(double, double, double, double, double, double, double, int); /* Phase-type distributions */ double dphtype(double, double *, double *, int, int); double pphtype(double, double *, double *, int, int, int); double rphtype(double *, double **, double *, int); double mphtype(double, double *, double *, int, int); double mgfphtype(double, double *, double *, int, int); /* Definitions for the tables linking the first group of functions to * the second one. Tables found in names.c. One table for * {d,p,q,m,lev} functions and one for the {r} functions since we * need one more argument: the type of the result. */ typedef struct { char *name; SEXP (*cfun)(int, SEXP); int code; } dpq_tab_struct; extern dpq_tab_struct dpq_tab[]; typedef struct { char *name; SEXP (*cfun)(int, SEXP, SEXPTYPE); int code; SEXPTYPE type; } random_tab_struct; extern random_tab_struct random_tab[]; actuar/src/pareto4.c0000644000176200001440000001433315147745722014064 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Pareto (type) IV distribution. See ../R/Pareto4.R for * details. * * We work with the density expressed as * * shape1 * shape2 * u^shape1 * (1 - u) / (x - min) * * with u = 1/(1 + v), v = ((x - min)/scale)^shape2. * * AUTHORS: Nicholas Langevin and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dpareto4(double x, double min, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return x + min + shape1 + shape2 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < min) return ACT_D__0; /* handle (x - min) == 0 separately */ if (x == min) { if (shape2 < 1) return R_PosInf; if (shape2 > 1) return ACT_D__0; /* else */ return ACT_D_val(shape1 / scale); } double logv, logu, log1mu; logv = shape2 * (log(x - min) - log(scale)); logu = - log1pexp(logv); log1mu = - log1pexp(-logv); return ACT_D_exp(log(shape1) + log(shape2) + shape1 * logu + log1mu - log(x - min)); } double ppareto4(double q, double min, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return q + min + shape1 + shape2 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (q <= min) return ACT_DT_0; double u = exp(-log1pexp(shape2 * (log(q - min) - log(scale)))); return ACT_DT_Cval(R_pow(u, shape1)); } double qpareto4(double p, double min, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return p + min + shape1 + shape2 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, min, R_PosInf); p = ACT_D_qIv(p); return min + scale * R_pow(R_pow(ACT_D_Cval(p), -1.0/shape1) - 1.0, 1.0/shape2); } double rpareto4(double min, double shape1, double shape2, double scale) { if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; return min + scale * R_pow(R_pow(unif_rand(), -1.0/shape1) - 1.0, 1.0/shape2); } double mpareto4(double order, double min, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return order + min + shape1 + shape2 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; /* The case min = 0 is a Burr with a larger range of admissible * values for order: - shape2 < order < shape1 * shape2. */ if (min == 0.0) return mburr(order, shape1, shape2, scale, give_log); /* From now on min != 0 and order must be a stricly non negative * integer < shape1 * shape2. */ if (order < 0.0) return R_NaN; if (order >= shape1 * shape2) return R_PosInf; int i; double order0 = order; double tmp, sum, r = scale/min; double Ga = gammafn(shape1); if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = Ga; /* first term in the sum */ for (i = 1; i <= order; i++) { tmp = i/shape2; sum += choose(order, i) * R_pow(r, i) * gammafn(1.0 + tmp) * gammafn(shape1 - tmp); } /* The first term of the sum is always min^order. */ return R_pow(min, order) * sum / Ga; } double levpareto4(double limit, double min, double shape1, double shape2, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale) || ISNAN(order)) return limit + min + shape1 + shape2 + scale + order; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (limit <= min) return 0.0; /* The case min = 0 is a Burr with a larger range of admissible * values for order: order > - shape2. */ if (min == 0.0) return levburr(limit, shape1, shape2, scale, order, give_log); /* From now on min != 0 and order must be a stricly non negative * integer. */ if (order < 0.0) return R_NaN; int i; double order0 = order; double logv, u, u1m; double tmp, sum, r = scale / min; logv = shape2 * (log(limit - min) - log(scale)); u = exp(-log1pexp(logv)); u1m = exp(-log1pexp(-logv)); if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = betaint_raw(u1m, 1.0, shape1, u); /* first term in the sum */ for (i = 1; i <= order; i++) { tmp = i / shape2; sum += choose(order, i) * R_pow(r, i) * betaint_raw(u1m, 1.0 + tmp, shape1 - tmp, u); } return R_pow(min, order) * sum / gammafn(shape1) + ACT_DLIM__0(limit, order) * R_pow(u, shape1); } actuar/src/invparalogis.c0000644000176200001440000000754215147745722015210 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the inverse paralogistic distribution. See ../R/InverseParalogistic.R * for details. * * We work with the density expressed as * * shape^2 * u^shape * (1 - u) / x * * with u = v/(1 + v) = 1/(1 + 1/v), v = (x/scale)^shape. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dinvparalogis(double x, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(scale)) return x + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN;; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape < 1.0) return R_PosInf; if (shape > 1.0) return ACT_D__0; /* else */ return ACT_D_val(1.0/scale); } double logv, logu, log1mu; logv = shape * (log(x) - log(scale)); logu = - log1pexp(-logv); log1mu = - log1pexp(logv); return ACT_D_exp(2.0 * log(shape) + shape * logu + log1mu - log(x)); } double pinvparalogis(double q, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(scale)) return q + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN;; if (q <= 0) return ACT_DT_0; double u = exp(-log1pexp(shape * (log(scale) - log(q)))); return ACT_DT_val(R_pow(u, shape)); } double qinvparalogis(double p, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(scale)) return p + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN;; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); double tmp = -1.0/shape; return scale * R_pow(R_pow(ACT_D_Lval(p), tmp) - 1.0, tmp); } double rinvparalogis(double shape, double scale) { double tmp; if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN;; tmp = -1.0/shape; return scale * R_pow(R_pow(unif_rand(), tmp) - 1.0, tmp); } double minvparalogis(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= - shape * shape || order >= shape) return R_PosInf; double tmp = order / shape; return R_pow(scale, order) * gammafn(shape + tmp) * gammafn(1.0 - tmp) / gammafn(shape); } double levinvparalogis(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape * shape) return R_PosInf; double logv, u, u1m; double tmp = order / shape; logv = shape * (log(limit) - log(scale)); u = exp(-log1pexp(-logv)); u1m = exp(-log1pexp(logv)); return R_pow(scale, order) * betaint_raw(u, shape + tmp, 1.0 - tmp, u1m) / gammafn(shape) + ACT_DLIM__0(limit, order) * (0.5 - R_pow(u, shape) + 0.5); } actuar/src/llogis.c0000644000176200001440000000713015147745722013774 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the loglogistic distribution. See ../R/Loglogistic.R for details. * * We work with the density expressed as * * shape * u * (1 - u) / x * * with u = v/(1 + v) = 1/(1 + 1/v), v = (x/scale)^shape. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dllogis(double x, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(scale)) return x + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape < 1) return R_PosInf; if (shape > 1) return ACT_D__0; /* else */ return ACT_D_val(1.0/scale); } double logv, logu, log1mu; logv = shape * (log(x) - log(scale)); logu = - log1pexp(-logv); log1mu = - log1pexp(logv); return ACT_D_exp(log(shape) + logu + log1mu - log(x)); } double pllogis(double q, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(scale)) return q + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double u = exp(-log1pexp(shape * (log(scale) - log(q)))); return ACT_DT_val(u); } double qllogis(double p, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(scale)) return p + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * R_pow(1.0/ACT_D_Cval(p) - 1.0, 1.0/shape); } double rllogis(double shape, double scale) { if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; return scale * R_pow(1.0/unif_rand() - 1.0, 1.0/shape); } double mllogis(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape || order >= shape) return R_PosInf; double tmp = order / shape; return R_pow(scale, order) * gammafn(1.0 + tmp) * gammafn(1.0 - tmp); } double levllogis(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN;; if (order <= -shape) return R_PosInf; if (limit <= 0.0) return 0; double logv, u, u1m; double tmp = order / shape; logv = shape * (log(limit) - log(scale)); u = exp(-log1pexp(-logv)); u1m = exp(-log1pexp(logv)); return R_pow(scale, order) * betaint_raw(u, 1.0 + tmp, 1.0 - tmp, u1m) + ACT_DLIM__0(limit, order) * (0.5 - u + 0.5); } actuar/src/zmnbinom.c0000644000176200001440000001347015151206331014317 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-modified negative binomial distribution. See * ../R/ZeroModifiedNegativeBinomial.R for details. * * Let X ~ NegativeBinomial(size, prob). The probability mass function of the * zero-modified Negative Binomial random variable Z is * * Pr[Z = 0] = p0m * Pr[Z = x] = (1 - p0m) * Pr[X = x]/(1 - prob^size), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = 1 - (1 - p0m) * (1 - Pr[X <= x])/(1 - prob^size). * * Limiting cases: * * 1. size == 0 is Zero Modified Logarithmic(1 - prob) (according to * the standard parametrization of the logarithmic distribution * used by {d,p,q,r}logarithmic(); * 2. prob == 1 has mass (1 - p0) at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dzmnbinom(double x, double size, double prob, double p0m, int give_log) { /* We compute Pr[X = 0] with dbinom_raw() [as would eventually * dnbinom()] to take advantage of all the optimizations for * small/large values of 'prob' and 'size' (and also to skip some * validity tests). */ #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob) || ISNAN(p0m)) return x + size + prob + p0m; #endif if (prob <= 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0 || !R_FINITE(x)) return ACT_D__0; if (x == 0) return ACT_D_val(p0m); /* NOTE: from now on x > 0 */ /* limiting case as size -> 0 is zero modified logarithmic */ if (size == 0) return dzmlogarithmic(x, 1 - prob, p0m, give_log); /* limiting case as prob -> 1 is mass (1 - p0m) at one */ if (prob == 1) return (x == 1) ? ACT_D_Clog(p0m) : ACT_D__0; double lp0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/1); return ACT_D_val((1 - p0m) * dnbinom(x, size, prob, /*give_log*/0) / (-expm1(lp0))); } double pzmnbinom(double x, double size, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob) || ISNAN(p0m)) return x + size + prob + p0m; #endif if (prob <= 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; if (x < 1) return ACT_DT_val(p0m); /* NOTE: from now on x >= 1 */ /* simple case for all x >= 1 */ if (p0m == 1) return ACT_DT_1; /* limiting case as size -> 0 is zero modified logarithmic */ if (size == 0) return pzmlogarithmic(x, 1 - prob, p0m, lower_tail, log_p); /* limiting case as prob -> 1 is mass (1 - p0m) at one */ if (prob == 1) return ACT_DT_1; double lp0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/1); /* working in log scale improves accuracy */ return ACT_DT_CEval(log1p(-p0m) + pnbinom(x, size, prob, /*l._t.*/0, /*log_p*/1) - log1mexp(-lp0)); } double qzmnbinom(double p, double size, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(size) || ISNAN(prob) || ISNAN(p0m)) return p + size + prob + p0m; #endif if (prob <= 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; /* limiting case as size -> 0 is zero modified logarithmic */ if (size == 0) return qzmlogarithmic(p, 1 - prob, p0m, lower_tail, log_p); ACT_Q_P01_check(p); if (p0m == 1) return 0.0; /* limiting case as prob -> 1 is mass (1 - p0m) at one */ if (prob == 1) return (ACT_DT_qIv(p) <= p0m) ? ACT_Q_p0lim(p0m) : 1.0; if (p == ACT_DT_0) return ACT_Q_p0lim(p0m); if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); /* at this point 0 < p < 1, so p0m = 0 is not an issue */ /* working in log scale improves accuracy */ double lp0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/1); return qnbinom(-expm1(log1mexp(-lp0) - log1p(-p0m) + log1p(-p)), size, prob, /*l._t.*/1, /*log_p*/0); } /* ALGORITHM FOR GENERATION OF RANDOM VARIATES * * 1. p0m >= p0: just simulate variates from the discrete mixture. * * 2. p0m < p0: fastest method depends on the difference p0 - p0m. * * 2.1 p0 - p0m < ACT_DIFFMAX_REJECTION: rejection method with an * envelope that differs from the target distribution at zero * only. In other words: rejection only at zero. * 2.2 p0 - p0m >= ACT_DIFFMAX_REJECTION: simulate variates from * discrete mixture with the corresponding zero truncated * distribution. * * The threshold ACT_DIFFMAX_REJECTION is distribution specific. */ #define ACT_DIFFMAX_REJECTION 0.6 double rzmnbinom(double size, double prob, double p0m) { if (!R_FINITE(prob) || prob <= 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; /* limiting case as size -> 0 is zero modified logarithmic */ if (size == 0) return rzmlogarithmic(1 - prob, p0m); /* limiting case as prob -> 1 is mass (1 - p0m) at one */ if (prob == 1) return (unif_rand() <= p0m) ? 0.0 : 1.0; double x, p0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/0); /* p0m >= p0: generate from mixture */ if (p0m >= p0) return (unif_rand() * (1 - p0) < (1 - p0m)) ? rnbinom(size, prob) : 0.0; /* p0m < p0: choice of algorithm depends on difference p0 - p0m */ if (p0 - p0m < ACT_DIFFMAX_REJECTION) { /* rejection method */ for (;;) { x = rnbinom(size, prob); if (x != 0 || /* x == 0 and */ runif(0, p0 * (1 - p0m)) <= (1 - p0) * p0m) return x; } } else { /* generate from zero truncated mixture */ return (unif_rand() <= p0m) ? 0.0 : qnbinom(runif(p0, 1), size, prob, /*l._t.*/1, /*log_p*/0); } } actuar/src/zmpois.c0000644000176200001440000001065415151206331014010 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-modified Poisson distribution. See ../R/ZeroModifiedPoisson.R * for details. * * Let X ~ Poisson(lambda). The probability mass function of the * zero-modified Poisson random variable Z is * * Pr[Z = 0] = p0m * Pr[Z = x] = (1 - p0m) * Pr[X = x]/(1 - exp(-lambda)), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = 1 - (1 - p0m) * (1 - Pr[X <= x])/(1 - exp(-lambda)). * * Limiting case: lambda == 0 has mass (1 - p0m) at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dzmpois(double x, double lambda, double p0m, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(lambda) || ISNAN(p0m)) return x + lambda + p0m; #endif if (lambda < 0 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0 || !R_FINITE(x)) return ACT_D__0; if (x == 0) return ACT_D_val(p0m); /* NOTE: from now on x > 0 */ /* simple case for all x > 0 */ if (p0m == 1) return ACT_D__0; /* for all x > 0 */ /* limiting case as lambda -> 0 is mass (1 - p0m) at one */ if (lambda == 0) return (x == 1) ? ACT_D_Clog(p0m) : ACT_D__0; return ACT_D_exp(dpois(x, lambda, /*give_log*/1) + log1p(-p0m) - log1mexp(lambda)); } double pzmpois(double x, double lambda, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(lambda) || ISNAN(p0m)) return x + lambda + p0m; #endif if (lambda < 0 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; if (x < 1) return ACT_DT_val(p0m); /* NOTE: from now on x >= 1 */ /* simple case for all x >= 1 */ if (p0m == 1) return ACT_DT_1; /* limiting case as lambda -> 0 is mass (1 - p0m) at one */ if (lambda == 0) return ACT_DT_1; /* working in log scale improves accuracy */ return ACT_DT_CEval(log1p(-p0m) + ppois(x, lambda, /*l._t.*/0, /*log_p*/1) - log1mexp(lambda)); } double qzmpois(double p, double lambda, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(lambda) || ISNAN(p0m)) return p + lambda + p0m; #endif if (lambda < 0 || !R_FINITE(lambda) || p0m < 0 || p0m > 1) return R_NaN; ACT_Q_P01_check(p); if (p0m == 1) return 0.0; /* limiting case as lambda -> 0 is mass (1 - p0m) at one */ if (lambda == 0) return (ACT_DT_qIv(p) <= p0m) ? ACT_Q_p0lim(p0m) : 1.0; if (p == ACT_DT_0) return ACT_Q_p0lim(p0m); if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); /* at this point 0 < p < 1, so p0m = 0 is not an issue */ /* working in log scale improves accuracy */ return (p <= p0m) ? 0.0 : qpois(-expm1(log1mexp(lambda) - log1p(-p0m) + log1p(-p)), lambda, /*l._t.*/1, /*log_p*/0); } /* ALGORITHM FOR GENERATION OF RANDOM VARIATES * * 1. p0m >= p0: just simulate variates from the discrete mixture. * * 2. p0m < p0: fastest method depends on the difference p0 - p0m. * * 2.1 p0 - p0m < ACT_DIFFMAX_REJECTION: rejection method with an * envelope that differs from the target distribution at zero * only. In other words: rejection only at zero. * 2.2 p0 - p0m >= ACT_DIFFMAX_REJECTION: inverse method on a * restricted range --- same method as the corresponding zero * truncated distribution. * * The threshold ACT_DIFFMAX_REJECTION is distribution specific. */ #define ACT_DIFFMAX_REJECTION 0.95 double rzmpois(double lambda, double p0m) { if (lambda < 0 || !R_FINITE(lambda) || p0m < 0 || p0m > 1) return R_NaN; /* limiting case as lambda -> 0 is mass (1 - p0m) at one */ if (lambda == 0) return (unif_rand() <= p0m) ? 0.0 : 1.0; double x, p0 = exp(-lambda); /* p0m >= p0: generate from mixture */ if (p0m >= p0) return (unif_rand() * (1 - p0) < (1 - p0m)) ? rpois(lambda) : 0.0; /* p0m < p0: choice of algorithm depends on difference p0 - p0m */ if (p0 - p0m < ACT_DIFFMAX_REJECTION) { /* rejection method */ for (;;) { x = rpois(lambda); if (x != 0 || /* x == 0 and */ runif(0, p0 * (1 - p0m)) <= (1 - p0) * p0m) return x; } } else { /* inversion method */ return qpois(runif((p0 - p0m)/(1 - p0m), 1), lambda, 1, 0); } } actuar/src/unif.c0000644000176200001440000000366715147745722013457 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to calculate raw and limited moments for the Uniform * distribution. See ../R/UniformSupp.R for details. * * AUTHORS: Christophe Dutang and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double munif(double order, double min, double max, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(min) || ISNAN(max)) return order + min + max; #endif if (!R_FINITE(min) || !R_FINITE(max) || min >= max) return R_NaN; if (order == -1.0) return (log(fabs(max)) - log(fabs(min))) / (max - min); double tmp = order + 1; return (R_pow(max, tmp) - R_pow(min, tmp)) / ((max - min) * tmp); } double levunif(double limit, double min, double max, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(min) || ISNAN(max) || ISNAN(order)) return limit + min + max + order; #endif if (!R_FINITE(min) || !R_FINITE(max) || min >= max) return R_NaN; if (limit <= min) return R_pow(limit, order); if (limit >= max) return munif(order, min, max, give_log); if (order == -1.0) return (log(fabs(limit)) - log(fabs(min))) / (max - min) + (max - limit) / (limit * (max - min)); double tmp = order + 1; return (R_pow(limit, tmp) - R_pow(min, tmp)) / ((max - min) * tmp) + R_pow(limit, order) * (max - limit) / (max - min); } double mgfunif(double t, double min, double max, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(min) || ISNAN(max)) return t + min + max; #endif if (!R_FINITE(min) || !R_FINITE(max) || min >= max) return R_NaN; if (t == 0.0) return ACT_D__1; double tmp1, tmp2; tmp1 = exp(t * max) - exp(t * min); tmp2 = t * (max - min); return ACT_D_exp(log(tmp1) - log(tmp2)); } actuar/src/actuar-win.def0000644000176200001440000000005215147745722015065 0ustar liggesusersLIBRARY actuar.dll EXPORTS R_init_actuar actuar/src/invpareto.c0000644000176200001440000001110115147745722014503 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Fonctions to compute density, cumulative distribution and quantile * fonctions, raw and limited moments and to simulate random variates * for the inverse Pareto distribution. See ../R/InversePareto.R for * details. * * We work with the density expressed as * * shape * u^shape * (1 - u) / x * * with u = v/(1 + v) = 1/(1 + 1/v), v = x/scale. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dinvpareto(double x, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(scale)) return x + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape < 1) return R_PosInf; if (shape > 1) return ACT_D__0; /* else */ return ACT_D_val(1.0/scale); } double tmp, logu, log1mu; tmp = log(x) - log(scale); logu = - log1pexp(-tmp); log1mu = - log1pexp(tmp); return ACT_D_exp(log(shape) + shape * logu + log1mu - log(x)); } double pinvpareto(double q, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(scale)) return q + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN;; if (q <= 0) return ACT_DT_0; double u = exp(-log1pexp(log(scale) - log(q))); return ACT_DT_val(R_pow(u, shape)); } double qinvpareto(double p, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(scale)) return p + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN;; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale / (R_pow(ACT_D_Lval(p), -1.0/shape) - 1.0); } double rinvpareto(double shape, double scale) { if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN;; return scale / (R_pow(unif_rand(), -1.0/shape) - 1.0); } double minvpareto(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape || order >= 1.0) return R_PosInf; return R_pow(scale, order) * gammafn(shape + order) * gammafn(1.0 - order) / gammafn(shape); } /* The function to integrate in the limited moment */ static void fn(double *x, int n, void *ex) { int i; double *pars = (double *) ex, shape, order; shape = pars[0]; order = pars[1]; for(i = 0; i < n; i++) x[i] = R_pow(x[i], shape + order - 1) * R_pow(1 - x[i], -order); } double levinvpareto(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape) return R_PosInf; if (limit <= 0.0) return 0.0; double u; double ex[2], lower, upper, epsabs, epsrel, result, abserr, *work; int neval, ier, subdiv, lenw, last, *iwork; /* Parameters for the integral are pretty much fixed here */ ex[0] = shape; ex[1] = order; lower = 0.0; upper = limit / (limit + scale); subdiv = 100; epsabs = R_pow(DBL_EPSILON, 0.25); epsrel = epsabs; lenw = 4 * subdiv; /* as instructed in WRE */ iwork = (int *) R_alloc(subdiv, sizeof(int)); /* idem */ work = (double *) R_alloc(lenw, sizeof(double)); /* idem */ Rdqags(fn, (void *) &ex, &lower, &upper, &epsabs, &epsrel, &result, &abserr, &neval, &ier, &subdiv, &lenw, &last, iwork, work); if (ier == 0) { u = exp(-log1pexp(log(scale) - log(limit))); return R_pow(scale, order) * shape * result + ACT_DLIM__0(limit, order) * (0.5 - R_pow(u, shape) + 0.5); } else error(_("integration failed")); } actuar/src/trbeta.c0000644000176200001440000001304215147745722013763 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the transformed beta distribution. See ../R/TransformedBeta.R for * details. * * We work with the density expressed as * * shape2 * u^shape3 * (1 - u)^shape1 / (x * beta(shape1, shape3)) * * with u = v/(1 + v) = 1/(1 + 1/v), v = (x/scale)^shape2. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dtrbeta(double x, double shape1, double shape2, double shape3, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return x + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape2 * shape3 < 1) return R_PosInf; if (shape2 * shape3 > 1) return ACT_D__0; /* else */ return give_log ? log(shape2) - log(scale) - lbeta(shape3, shape1) : shape2 / (scale * beta(shape3, shape1)); } double logv, logu, log1mu; logv = shape2 * (log(x) - log(scale)); logu = - log1pexp(-logv); log1mu = - log1pexp(logv); return ACT_D_exp(log(shape2) + shape3 * logu + shape1 * log1mu - log(x) - lbeta(shape3, shape1)); } double ptrbeta(double q, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return q + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double logvm, u; logvm = shape2 * (log(scale) - log(q)); /* -log v */ u = exp(-log1pexp(logvm)); if (u > 0.5) { /* Compute (1 - x) accurately */ double u1m = exp(-log1pexp(-logvm)); return pbeta(u1m, shape1, shape3, 1 - lower_tail, log_p); } /* else u <= 0.5 */ return pbeta(u, shape3, shape1, lower_tail, log_p); } double qtrbeta(double p, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return p + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * R_pow(1.0/qbeta(p, shape3, shape1, lower_tail, 0) - 1.0, -1.0/shape2); } double rtrbeta(double shape1, double shape2, double shape3, double scale) { if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; return scale * R_pow(1.0/rbeta(shape3, shape1) - 1.0, -1.0/shape2); } double mtrbeta(double order, double shape1, double shape2, double shape3, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return order + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= - shape3 * shape2 || order >= shape1 * shape2) return R_PosInf; double tmp = order / shape2; return R_pow(scale, order) * beta(shape3 + tmp, shape1 - tmp) / beta(shape1, shape3); } double levtrbeta(double limit, double shape1, double shape2, double shape3, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale) || ISNAN(order)) return limit + shape1 + shape2 + shape3 + scale + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= - shape3 * shape2) return R_PosInf; if (limit <= 0.0) return 0.0; double logv, u, u1m, Ix; double tmp = order / shape2; logv = shape2 * (log(limit) - log(scale)); u = exp(-log1pexp(-logv)); u1m = exp(-log1pexp(logv)); Ix = (u > 0.5) ? pbeta(u1m, shape1, shape3, /*l._t.*/1, /*give_log*/0) : pbeta(u, shape3, shape1, /*l._t.*/0, /*give_log*/0); return R_pow(scale, order) * betaint_raw(u, shape3 + tmp, shape1 - tmp, u1m) / (gammafn(shape1) * gammafn(shape3)) + ACT_DLIM__0(limit, order) * Ix; } actuar/src/zmlogarithmic.c0000644000176200001440000000647315151206331015344 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-modified logarithmic distribution. See ../R/ZeroModifiedLogarithmic.R * for details. * * Let X ~ Logarithmic(prob). The probability mass function of the * zero-modified Logarithmic random variable Z is * * Pr[Z = 0] = p0m * Pr[Z = x] = (1 - p0m) * Pr[X = x], x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = 1 - (1 - p0m) * (1 - Pr[X <= x]). * * Limiting case: prob == 0 has mass (1 - p0m) at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dzmlogarithmic(double x, double prob, double p0m, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(prob) || ISNAN(p0m)) return x + prob + p0m; #endif if (prob < 0 || prob >= 1 || p0m < 0 || p0m > 1) return R_NaN; ACT_D_nonint_check(x); if (!R_FINITE(x) || x < 0) return ACT_D__0; if (x == 0) return ACT_D_val(p0m); /* NOTE: from now on x > 0 */ /* limiting case as prob -> 0 is mass (1 - p0m) at one */ if (prob == 0) return (x == 1) ? ACT_D_Clog(p0m) : ACT_D__0; x = ACT_forceint(x); double a = -1.0/log1p(-prob); return ACT_D_exp(log(a) + x * log(prob) + log1p(-p0m) - log(x)); } double pzmlogarithmic(double x, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(prob) || ISNAN(p0m)) return x + prob + p0m; #endif if (prob < 0 || prob >= 1 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; if (x < 1) return ACT_DT_val(p0m); /* NOTE: from now on x >= 1 */ /* simple case for all x >= 1 */ if (p0m == 1) return ACT_DT_1; /* limiting case as prob -> 0 is mass (1 - p0m) at one. */ if (prob == 0) return ACT_DT_1; return ACT_DT_Cval((1 - p0m) * plogarithmic(x, prob, /*l._t.*/0, /*log_p*/0)); } double qzmlogarithmic(double p, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(prob) || ISNAN(p0m)) return p + prob + p0m; #endif if (prob < 0 || prob >= 1 || p0m < 0 || p0m > 1) return R_NaN; ACT_Q_P01_check(p); if (p0m == 1) return 0.0; /* limiting case as prob -> 0 is mass (1 - p0m) at one. */ if (prob == 0) return (ACT_DT_qIv(p) <= p0m) ? ACT_Q_p0lim(p0m) : 1.0; if (p == ACT_DT_0) return ACT_Q_p0lim(p0m); if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); /* avoid rounding errors if p was given in log form */ if (log_p) p0m = exp(log(p0m)); /* avoid rounding errors if p was given as upper tail */ if (!lower_tail) p0m = 0.5 - (0.5 - p0m + 0.5) + 0.5; /* at this point 0 < p < 1, so p0m = 0 is not an issue */ return (p <= p0m) ? 0.0 : qlogarithmic((p - p0m)/(1 - p0m), prob, /*l._t.*/1, /*log_p*/0); } /* ALGORITHM FOR GENERATION OF RANDOM VARIATES * * Just simulate variates from the discrete mixture. * */ double rzmlogarithmic(double prob, double p0m) { if (prob < 0 || prob >= 1 || p0m < 0 || p0m > 1) return R_NaN; return (unif_rand() < p0m) ? 0.0 : rlogarithmic(prob); } actuar/src/pareto1.c0000644000176200001440000000567015147745722014065 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the single-parameter Pareto distribution. See * ../R/SingleParameterPareto.R for details. * * The density function is * * shape * min^shape / x^(shape + 1). * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dpareto1(double x, double shape, double min, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(min)) return x + shape + min; #endif if (!R_FINITE(shape) || !R_FINITE(min) || shape <= 0.0 || min <= 0.0) return R_NaN; if (!R_FINITE(x) || x < min) return ACT_D__0; return ACT_D_exp(log(shape) + shape * log(min) - (shape + 1.0) * log(x)); } double ppareto1(double q, double shape, double min, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(min)) return q + shape + min; #endif if (!R_FINITE(shape) || !R_FINITE(min) || shape <= 0.0 || min <= 0.0) return R_NaN; if (q <= min) return ACT_DT_0; return ACT_DT_Cval(R_pow(min / q, shape)); } double qpareto1(double p, double shape, double min, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(min)) return p + shape + min; #endif if (!R_FINITE(shape) || !R_FINITE(min) || shape <= 0.0 || min <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, min, R_PosInf); p = ACT_D_qIv(p); return min / R_pow(ACT_D_Cval(p), 1.0/shape); } double rpareto1(double shape, double min) { if (!R_FINITE(shape) || !R_FINITE(min) || shape <= 0.0 || min <= 0.0) return R_NaN; return min / R_pow(unif_rand(), 1.0/shape); } double mpareto1(double order, double shape, double min, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(min)) return order + shape + min; #endif if (!R_FINITE(shape) || !R_FINITE(min) || !R_FINITE(order) || shape <= 0.0 || min <= 0.0) return R_NaN; if (order >= shape) return R_PosInf; return shape * R_pow(min, order) / (shape - order); } double levpareto1(double limit, double shape, double min, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(min) || ISNAN(order)) return limit + shape + min + order; #endif if (!R_FINITE(shape) || !R_FINITE(min) || !R_FINITE(order) || shape <= 0.0 || min <= 0.0) return R_NaN; if (limit <= min) return 0.0; double tmp = shape - order; return shape * R_pow(min, order) / tmp - order * R_pow(min, shape) / (tmp * R_pow(limit, tmp)); } actuar/src/invburr.c0000644000176200001440000001070315147745722014172 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the inverse Burr distribution. See ../R/InverseBurr.R for details. * * We work with the density expressed as * * shape1 * shape2 * u^shape1 * (1 - u) / x * * with u = v/(1 + v) = 1/(1 + 1/v), v = (x/scale)^shape2. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dinvburr(double x, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return x + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape1 * shape2 < 1) return R_PosInf; if (shape1 * shape2 > 1) return ACT_D__0; /* else */ return ACT_D_val(1.0/scale); } double logv, logu, log1mu; logv = shape2 * (log(x) - log(scale)); logu = - log1pexp(-logv); log1mu = - log1pexp(logv); return ACT_D_exp(log(shape1) + log(shape2) + shape1 * logu + log1mu - log(x)); } double pinvburr(double q, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return q + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double u = exp(-log1pexp(shape2 * (log(scale) - log(q)))); return ACT_DT_val(R_pow(u, shape1)); } double qinvburr(double p, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return p + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * R_pow(R_pow(ACT_D_Lval(p), -1.0/shape1) - 1.0, -1.0/shape2); } double rinvburr(double shape1, double shape2, double scale) { if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; return scale * R_pow(R_pow(unif_rand(), -1.0/shape1) - 1.0, -1.0/shape2); } double minvburr(double order, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return order + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= - shape1 * shape2 || order >= shape2) return R_PosInf; double tmp = order / shape2; return R_pow(scale, order) * gammafn(shape1 + tmp) * gammafn(1.0 - tmp) / gammafn(shape1); } double levinvburr(double limit, double shape1, double shape2, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale) || ISNAN(order)) return limit + shape1 + shape2 + scale + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape1 * shape2) return R_PosInf; if (limit <= 0.0) return 0.0; double logv, u, u1m; double tmp = order / shape2; logv = shape2 * (log(limit) - log(scale)); u = exp(-log1pexp(-logv)); u1m = exp(-log1pexp(logv)); return R_pow(scale, order) * betaint_raw(u, shape1 + tmp, 1.0 - tmp, u1m) / gammafn(shape1) + ACT_DLIM__0(limit, order) * (0.5 - R_pow(u, shape1) + 0.5); } actuar/src/fpareto.c0000644000176200001440000001664615147745722014157 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Feller-Pareto distribution. See ../R/FellerPareto.R for * details. * * We work with the density expressed as * * shape2 * u^shape3 * (1 - u)^shape1 / ((x - min) * beta(shape1, shape3)) * * with u = v/(1 + v) = 1/(1 + 1/v), v = ((x - min)/scale)^shape2. * * AUTHORS: Nicholas Langevin and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dfpareto(double x, double min, double shape1, double shape2, double shape3, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return x + min + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < min) return ACT_D__0; /* handle (x - min) == 0 separately */ if (x == min) { if (shape2 * shape3 < 1) return R_PosInf; if (shape2 * shape3 > 1) return ACT_D__0; /* else */ return give_log ? log(shape2) - log(scale) - lbeta(shape3, shape1) : shape2 / (scale * beta(shape3, shape1)); } double logv, logu, log1mu; logv = shape2 * (log(x - min) - log(scale)); logu = - log1pexp(-logv); log1mu = - log1pexp(logv); return ACT_D_exp(log(shape2) + shape3 * logu + shape1 * log1mu - log(x - min) - lbeta(shape3, shape1)); } double pfpareto(double q, double min, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return q + min + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (q <= min) return ACT_DT_0; double logvm, u; logvm = shape2 * (log(scale) - log(q - min)); /* -log v */ u = exp(-log1pexp(logvm)); if (u > 0.5) { /* Compute (1 - u) accurately */ double u1m = exp(-log1pexp(-logvm)); return pbeta(u1m, shape1, shape3, 1 - lower_tail, log_p); } /* else u <= 0.5 */ return pbeta(u, shape3, shape1, lower_tail, log_p); } double qfpareto(double p, double min, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return p + min + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, min, R_PosInf); p = ACT_D_qIv(p); return min + scale * R_pow(1.0 / qbeta(p, shape3, shape1, lower_tail, 0) - 1.0, -1.0/shape2); } double rfpareto(double min, double shape1, double shape2, double shape3, double scale) { if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; return min + scale * R_pow(1.0/rbeta(shape1, shape3) - 1.0, 1.0/shape2); } double mfpareto(double order, double min, double shape1, double shape2, double shape3, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return order + min + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; /* The case min = 0 is a Transformed Beta with a larger range of * admissible values for order: - shape3 * shape2 < order < * shape1 * shape2. */ if (min == 0.0) return mtrbeta(order, shape1, shape2, shape3, scale, give_log); /* From now on min != 0 and order must be a stricly non negative * integer < shape1 * shape2. */ if (order < 0.0) return R_NaN; if (order >= shape1 * shape2) return R_PosInf; int i; double order0 = order; double tmp, sum, r = scale/min; double Be = beta(shape1, shape3); if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = Be; /* first term in the sum */ for (i = 1; i <= order; i++) { tmp = i/shape2; sum += choose(order, i) * R_pow(r, i) * beta(shape3 + tmp, shape1 - tmp); } return R_pow(min, order) * sum / Be; } double levfpareto(double limit, double min, double shape1, double shape2, double shape3, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(min) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale) || ISNAN(order)) return limit + min + shape1 + shape2 + shape3 + scale + order; #endif if (!R_FINITE(min) || !R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (limit <= min) return 0.0; /* The case min = 0 is a Transformed Beta with a larger range of * admissible values for order: order > - shape3 * shape2. */ if (min == 0.0) return levtrbeta(limit, shape1, shape2, shape3, scale, order, give_log); /* From now on min != 0 and order must be a stricly non negative * integer. */ if (order < 0.0) return R_NaN; int i; double order0 = order; double logv, u, u1m, Ix; double tmp, sum, r = scale / min; logv = shape2 * (log(limit - min) - log(scale)); u = exp(-log1pexp(-logv)); u1m = exp(-log1pexp(logv)); if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = betaint_raw(u, shape3, shape1, u1m); /* first term in the sum */ for (i = 1; i <= order; i++) { tmp = i / shape2; sum += choose(order, i) * R_pow(r, i) * betaint_raw(u, shape3 + tmp, shape1 - tmp, u1m); } Ix = (u > 0.5) ? pbeta(u1m, shape1, shape3, /*l._t.*/1, /*give_log*/0) : pbeta(u, shape3, shape1, /*l._t.*/0, /*give_log*/0); return R_pow(min, order) * sum / (gammafn(shape1) * gammafn(shape3)) + ACT_DLIM__0(limit, order) * Ix; } actuar/src/beta.c0000644000176200001440000000267315147745722013425 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to calculate raw and limited moments for the Beta * distribution. See ../R/BetaMoments.R for details. * * AUTHOR: Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double mbeta(double order, double shape1, double shape2, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2)) return order + shape1 + shape2; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0) return R_NaN; if (order <= -shape1) return R_PosInf; return beta(shape1 + order, shape2) / beta(shape1, shape2); } double levbeta(double limit, double shape1, double shape2, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(order)) return limit + shape1 + shape2 + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0) return R_NaN; if (order <= -shape1) return R_PosInf; if (limit <= 0.0) return 0.0; double tmp = order + shape1; return beta(tmp, shape2) / beta(shape1, shape2) * pbeta(limit, tmp, shape2, 1, 0) + ACT_DLIM__0(limit, order) * pbeta(limit, shape1, shape2, 0, 0); } actuar/src/lgamma.c0000644000176200001440000000633115147745722013743 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, and to simulate random variates for the loggamma * distribution. See ../R/Loggamma.R for details. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dlgamma(double x, double shapelog, double ratelog, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shapelog) || ISNAN(ratelog)) return x + shapelog + ratelog; #endif if (!R_FINITE(shapelog) || !R_FINITE(ratelog) || shapelog <= 0.0 || ratelog < 0.0) return R_NaN;; if (!R_FINITE(x) || x < 1.0) return ACT_D__0; return ACT_D_exp(dgamma(log(x), shapelog, 1.0/ratelog, 1) - log(x)); } double plgamma(double q, double shapelog, double ratelog, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shapelog) || ISNAN(ratelog)) return q + shapelog + ratelog; #endif if (!R_FINITE(shapelog) || !R_FINITE(ratelog) || shapelog <= 0.0 || ratelog < 0.0) return R_NaN;; if (q <= 1.0) return ACT_DT_0; return pgamma(log(q), shapelog, 1.0/ratelog, lower_tail, log_p); } double qlgamma(double p, double shapelog, double ratelog, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shapelog) || ISNAN(ratelog)) return p + shapelog + ratelog; #endif if (!R_FINITE(shapelog) || !R_FINITE(ratelog) || shapelog <= 0.0 || ratelog <= 0.0) return R_NaN;; ACT_Q_P01_boundaries(p, 1, R_PosInf); p = ACT_D_qIv(p); return exp(qgamma(p, shapelog, 1.0/ratelog, lower_tail, 0)); } double rlgamma(double shapelog, double ratelog) { if (!R_FINITE(shapelog) || !R_FINITE(ratelog) || shapelog <= 0.0 || ratelog <= 0.0) return R_NaN;; return exp(rgamma(shapelog, 1.0/ratelog)); } double mlgamma(double order, double shapelog, double ratelog, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shapelog) || ISNAN(ratelog)) return order + shapelog + ratelog; #endif if (!R_FINITE(shapelog) || !R_FINITE(ratelog) || !R_FINITE(order) || shapelog <= 0.0 || ratelog <= 0.0) return R_NaN; if (order >= ratelog) return R_PosInf; return R_pow(1.0 - order / ratelog, -shapelog); } double levlgamma(double limit, double shapelog, double ratelog, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shapelog) || ISNAN(ratelog) || ISNAN(order)) return limit + shapelog + ratelog + order; #endif if (!R_FINITE(shapelog) || !R_FINITE(ratelog) || !R_FINITE(limit) || !R_FINITE(order) || shapelog <= 0.0 || ratelog <= 0.0 || limit <= 0.0) return R_NaN; if (order >= ratelog) return R_PosInf; if (limit <= 1.0) return 0.0; double u = log(limit); return R_pow(1.0 - order / ratelog, -shapelog) * pgamma(u * (ratelog - order), shapelog, 1.0, 1, 0) + ACT_DLIM__0(limit, order) * pgamma(u * ratelog, shapelog, 1.0, 0, 0); } actuar/src/poisinvgauss.c0000644000176200001440000001222015151206331015210 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, and to simulate random variates for the Poisson-inverse * gaussian distribution. See ../R/PoissonInverseGaussian.R for * details. * * We work with the density expressed as * * p(x) = sqrt(1/phi) sqrt(1/(pi/2)) exp(1/(phi mu))/x! * * [sqrt(2 phi (1 + (2 phi mu^2)^(-1)))]^(-(x - 0.5)) * * bessel_k(sqrt(2/phi (1 + (2 phi mu^2)^(-1))), x - 0.5) * * or, is essence, * * p(x) = A exp(1/(phi mu))/x! B^(-y) bessel_k(B/phi, y) * * The limiting case mu = Inf is handled "automatically" with terms * going to zero when mu is Inf. Specific code not worth it since the * function should rarely be evaluated with mu = Inf in practice. * * AUTHOR: Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dpoisinvgauss_raw(double x, double mu, double phi, int give_log) { /* Here assume that x is integer, 0 < x < Inf, mu > 0, 0 < phi < Inf */ int i; double p, pi1m, pi2m; double twophi = phi + phi; /* limiting case mu = Inf with simpler recursive formulas */ if (!R_FINITE(mu)) { p = -sqrt(2/phi); /* log p[0] */ if (x == 0.0) return ACT_D_exp(p); pi2m = exp(p); /* p[i - 2] = p[0]*/ p = p - (M_LN2 + log(phi))/2; /* log p[1] */ if (x == 1.0) return ACT_D_exp(p); pi1m = exp(p); /* p[i - 1] = p[1] */ for (i = 2; i <= x; i++) { p = (1 - 1.5/i) * pi1m + pi2m/twophi/(i * (i - 1)); pi2m = pi1m; pi1m = p; } return ACT_D_val(p); } /* else: "standard" case with mu < Inf */ double A, B; double mu2 = mu * mu; double twophimu2 = twophi * mu2; p = (1.0 - sqrt(1.0 + twophimu2))/phi/mu; /* log p[0] */ if (x == 0.0) return ACT_D_exp(p); pi2m = exp(p); /* p[i - 2] = p[0]*/ p = log(mu) + p - log1p(twophimu2)/2.0; /* log p[1] */ if (x == 1.0) return ACT_D_exp(p); pi1m = exp(p); /* p[i - 1] = p[1] */ A = 1.0/(1.0 + 1.0/twophimu2); /* constant in first term */ B = mu2/(1.0 + twophimu2); /* constant in second term */ for (i = 2; i <= x; i++) { p = A * (1 - 1.5/i) * pi1m + (B * pi2m)/(i * (i - 1)); pi2m = pi1m; pi1m = p; } return ACT_D_val(p); } double dpoisinvgauss(double x, double mu, double phi, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(mu) || ISNAN(phi)) return x + mu + phi; #endif if (mu <= 0.0 || phi <= 0.0) return R_NaN; ACT_D_nonint_check(x); if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* limiting case phi = Inf */ if (!R_FINITE(phi)) return (x == 0) ? ACT_D__1 : ACT_D__0; return dpoisinvgauss_raw(x, mu, phi, give_log); } /* For ppoisinvgauss(), there does not seem to be algorithms much * more elaborate than successive computations of the probabilities. */ double ppoisinvgauss(double q, double mu, double phi, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(mu) || ISNAN(phi)) return q + mu + phi; #endif if (mu <= 0.0 || phi <= 0.0) return R_NaN; if (q < 0) return ACT_DT_0; /* limiting case phi = Inf */ if (!R_FINITE(phi)) return ACT_DT_1; if (!R_FINITE(q)) return ACT_DT_1; int x; double s = 0; for (x = 0; x <= q; x++) s += dpoisinvgauss_raw(x, mu, phi, /*give_log*/ 0); return ACT_DT_val(s); } /* For qpoisinvgauss() we mostly reuse the code from qnbinom() et al. * of R sources. From src/nmath/qnbinom.c: * * METHOD * * Uses the Cornish-Fisher Expansion to include a skewness * correction to a normal approximation. This gives an * initial value which never seems to be off by more than * 1 or 2. A search is then conducted of values close to * this initial start point. * * For the limiting case mu = Inf (that has no finite moments), we * use instead the quantile of an inverse chi-square distribution as * starting point. */ #define _thisDIST_ poisinvgauss #define _dist_PARS_DECL_ double mu, double phi #define _dist_PARS_ mu, phi #include "qDiscrete_search.h" /* do_search() et al. */ double qpoisinvgauss(double p, double mu, double phi, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(mu) || ISNAN(phi)) return p + mu + phi; #endif if (mu <= 0.0 || phi <= 0.0) return R_NaN; /* limiting case phi = Inf */ if (!R_FINITE(phi)) return 0.0; ACT_Q_P01_boundaries(p, 0, R_PosInf); double phim2 = phi * mu * mu, sigma2 = phim2 * mu + mu, sigma = sqrt(sigma2), gamma = (3 * phim2 * sigma2 + mu)/sigma2/sigma; /* limiting case mu = Inf -> inverse chi-square as starting point*/ /* other cases -> Cornish-Fisher as usual */ double z, y; if (!R_FINITE(mu)) y = ACT_forceint(1/phi/qchisq(p, 1, lower_tail, log_p)); else { z = qnorm(p, 0., 1., lower_tail, log_p); y = ACT_forceint(mu + sigma * (z + gamma * (z*z - 1) / 6)); } q_DISCRETE_BODY(); } double rpoisinvgauss(double mu, double phi) { if (mu <= 0.0 || phi <= 0.0) return R_NaN; return rpois(rinvgauss(mu, phi)); } actuar/src/norm.c0000644000176200001440000000272615147745722013464 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to calculate raw moments and the moment generating * function for the normal distribution. See ../R/NormalSupp.R for * details. * * AUTHORS: Christophe Dutang and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double mnorm(double order, double mean, double sd, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(mean) || ISNAN(sd)) return order + mean + sd; #endif if (!R_FINITE(mean) || !R_FINITE(sd) || !R_FINITE(order) || sd <= 0.0 || ACT_nonint(order)) return R_NaN; /* Trivial case */ if (order == 0.0) return 1.0; /* Odd moments about 0 are equal to 0 */ if ((int) order % 2 == 1 && mean == 0.0) return 0.0; int i, n = order; double res = 0.0; for (i = 0; i <= n/2; i++) res += R_pow_di(sd, 2 * i) * R_pow_di(mean, n - 2 * i) / (R_pow_di(2.0, i) * gammafn(i + 1) * gammafn(order - 2.0 * i + 1.0)); return gammafn(order + 1.0) * res; } double mgfnorm(double t, double mean, double sd, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(mean) || ISNAN(sd)) return t + mean + sd; #endif if (!R_FINITE(mean) || !R_FINITE(sd) || sd <= 0.0) return R_NaN; if (t == 0.0) return ACT_D__1; return ACT_D_exp(t * mean + 0.5 * t * t * sd * sd) ; } actuar/src/invweibull.c0000644000176200001440000000643115147745722014666 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the inverse Weibull distribution. See ../R/InverseWeibull.R for * details. * * We work with the density expressed as * * shape * u * e^(-u) / x * * with u = (scale/x)^shape. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dinvweibull(double x, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(scale)) return x + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale < 0.0) return R_NaN;; /* handle also x == 0 here */ if (!R_FINITE(x) || x <= 0.0) return ACT_D__0; double logu = shape * (log(scale) - log(x)); return ACT_D_exp(log(shape) + logu - exp(logu) - log(x)); } double pinvweibull(double q, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(scale)) return q + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale < 0.0) return R_NaN;; if (q <= 0) return ACT_DT_0; double u = exp(shape * (log(scale) - log(q))); return ACT_DT_Eval(-u); } double qinvweibull(double p, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(scale)) return p + shape + scale; #endif if (!R_FINITE(scale) || !R_FINITE(shape) || scale <= 0.0 || shape <= 0.0) return R_NaN;; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * R_pow(-log(ACT_D_Lval(p)), -1.0/shape); } double rinvweibull(double shape, double scale) { if (!R_FINITE(scale) || !R_FINITE(shape) || scale <= 0.0 || shape <= 0.0) return R_NaN;; return scale * R_pow(rexp(1.0), -1.0/shape); } double minvweibull(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(scale) || !R_FINITE(shape) || !R_FINITE(order) || scale <= 0.0 || shape <= 0.0) return R_NaN; if (order >= shape) return R_PosInf; return R_pow(scale, order) * gammafn(1.0 - order / shape); } double levinvweibull(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(scale) || !R_FINITE(shape) || !R_FINITE(order) || scale <= 0.0 || shape <= 0.0) return R_NaN; if (order >= shape) return R_PosInf; if (limit <= 0.0) return 0.0; double u = exp(shape * (log(scale) - log(limit))); return R_pow(scale, order) * actuar_gamma_inc(1.0 - order/shape, u) + ACT_DLIM__0(limit, order) * (0.5 - exp(-u) + 0.5); } actuar/src/randomphtype.c0000644000176200001440000001050415147745722015214 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to generate variates of phase-type distributions. This * file is based on random.c with the following modifications: * * 1. support for a matrix argument; * 2. no iteration over the parameters; * 3. support for two parameter distributions only; * 4. no support for integer random variates. * * For details, see random.c. * * AUTHOR: Vincent Goulet */ #include #include #include #include "actuar.h" #include "locale.h" /* Prototypes of auxiliary function */ static Rboolean randomphtype2(double (*f)(double *, double **, double *, int), double *, double *, int, double *, int); static Rboolean randomphtype2(double (*f)(double *, double **, double *, int), double *a, double *b, int na, double *x, int n) { int i, j; double *rates, **Q; Rboolean naflag = FALSE; /* The sub-intensity matrix and initial probability vector never * change, so compute the transition matrix of the underlying * Markov chain and the vector of rate parameters before * looping. */ rates = (double *) R_alloc(na, sizeof(double)); Q = (double **) R_alloc(na, sizeof(double)); for (i = 0; i < na; i++) { Q[i] = (double *) S_alloc(na, sizeof(double)); rates[i] = -b[i * (na + 1)]; for (j = 0; j < na; j++) if (i != j) Q[i][j] = b[i + j * na] / rates[i]; } for (i = 0; i < n; i++) { x[i] = f(a, Q, rates, na); if (!R_FINITE(x[i])) naflag = TRUE; } return(naflag); } #define RANDPHTYPE2(num, fun) \ case num: \ randomphtype2(fun, REAL(a), REAL(b), na, REAL(x), n); \ break /* The function below retains a 'type' argument that is not actually * used. This is to fit within the scheme of the other random * generation functions of random.c and names.c. */ SEXP actuar_do_randomphtype2(int code, SEXP args, SEXPTYPE type /* unused */) { SEXP x, a, b, bdims; int i, n, na, nrow, ncol; Rboolean naflag = FALSE; /* Check validity of arguments */ if (!isVector(CAR(args)) || !isNumeric(CADR(args)) || !isMatrix(CADDR(args))) error(_("invalid arguments")); /* Number of variates to generate */ if (LENGTH(CAR(args)) == 1) { n = asInteger(CAR(args)); if (n == NA_INTEGER || n < 0) error(_("invalid arguments")); } else n = LENGTH(CAR(args)); /* If n == 0, return numeric(0) */ PROTECT(x = allocVector(REALSXP, n)); if (n == 0) { UNPROTECT(1); return(x); } /* Sanity checks of arguments. */ PROTECT(a = coerceVector(CADR(args), REALSXP)); PROTECT(b = coerceVector(CADDR(args), REALSXP)); bdims = getAttrib(b, R_DimSymbol); nrow = INTEGER(bdims)[0]; ncol = INTEGER(bdims)[1]; if (nrow != ncol) error(_("non-square sub-intensity matrix")); na = LENGTH(a); if (na != nrow) error(_("non-conformable arguments")); /* If length of parameters < 1, or either of the two parameters * is NA return NA. */ if (na < 1 || (na == 1 && !(R_FINITE(REAL(a)[0]) && R_FINITE(REAL(b)[0])))) { for (i = 0; i < n; i++) REAL(x)[i] = NA_REAL; } /* Otherwise, dispatch to appropriate r* function */ else { naflag = FALSE; GetRNGstate(); switch (code) { RANDPHTYPE2(1, rphtype); default: error(_("internal error in actuar_do_randomphtype2")); } if (naflag) warning(R_MSG_NA); PutRNGstate(); } UNPROTECT(3); return x; } /* Main function, the only one used by .External(). */ SEXP actuar_do_randomphtype(SEXP args) { int i; const char *name; /* Extract distribution name */ args = CDR(args); name = CHAR(STRING_ELT(CAR(args), 0)); /* Dispatch to actuar_do_random{1,2,3,4} */ for (i = 0; random_tab[i].name; i++) if (!strcmp(random_tab[i].name, name)) return random_tab[i].cfun(random_tab[i].code, CDR(args), random_tab[i].type); /* No dispatch is an error */ error(_("internal error in actuar_do_randomphtype")); return args; /* never used; to keep -Wall happy */ } actuar/src/betaint.c0000644000176200001440000001221315147745722014127 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Function to compute the integral * * B(a, b; x) = gammafn(a + b) int_0^x t^(a-1) (1-t)^(b-1) dt * * for a > 0, b != -1, -2, ... and 0 < x < 1. When b > 0, * * B(a, b; x) = gammafn(a) gammafn(b) pbeta(x, a, b). * * When b < 0 and b != -1, -2, ... and a > 1 + floor(-b), * * B(a, b; x) * = -gammafn(a + b) {(x^(a-1) (1-x)^b)/b * + [(a-1) x^(a-2) (1-x)^(b+1)]/[b(b+1)] * + ... * + [(a-1)...(a-r) x^(a-r-1) (1-x)^(b+r)]/[b(b+1)...(b+r)]} * + [(a-1)...(a-r-1)]/[b(b+1)...(b+r)] gammafn(a-r-1) * * gammafn(b+r+1) pbeta(x, a-r-1, b+r+1) * * See Appendix A of Klugman, Panjer & Willmot, Loss Models, * Fourth Edition, Wiley, 2012 for the formula. * * AUTHOR: Vincent Goulet */ #include #include #include #include "actuar.h" #include "dpq.h" #include "locale.h" double betaint_raw(double x, double a, double b, double x1m) { /* Here, assume that (x, a, b) are not NA, 0 < x < 1 and 0 < a < Inf. */ if (b > 0) { /* I(x, a, b) = 1 - I(1 - x, b, a) */ double Ix = (x > 0.5) ? pbeta(x1m, b, a, /*l._t.*/0, /*give_log*/0) : pbeta(x, a, b, /*l._t.*/1, /*give_log*/0); return gammafn(a) * gammafn(b) * Ix; } double r = floor(-b); if (! (ACT_nonint(b) && a - r - 1 > 0)) return R_NaN; /* There are two quantities to accumulate in order to compute the * final result: the alternating sum (to be stored in 'sum') and * the ratio [(a - 1) ... (a - r)]/[b(b + 1) ... (b + r)] (to be * stored in 'ratio'). Some calculations are done in the log * scale. */ int i; double ap = a, bp = b; /* copies of a and b */ double lx = log(x); /* log(x) */ double lx1m = log(x1m); /* log(1 - x) */ double x1 = exp(lx1m - lx); /* (1 - x)/x */ double c, tmp, sum, ratio; /* Computation of the first term in the alternating sum. */ ap--; /* a - 1 */ c = exp(ap * lx + bp * lx1m)/bp; /* (x^(a - 1) (1 - x)^b) / b */ sum = c; /* first term */ ratio = 1/bp; /* 1 / b */ bp++; /* b + 1 */ /* Other terms in the alternating sum iff r > 0. * Relies on the fact that each new term in the sum is * * [previous term] * (a - i - 1)(1 - x)/[(b + i + 1) x] * * for i = 0, ..., r - 1. We need to compute this value as * * {[previous term] * [(1 - x)/x]} * [(a - i - 1)/(b + i + 1)] * * to preserve accuracy for very small values of x (near * DBL_MIN). */ for (i = 0; i < r; i++) { tmp = ap/bp; /* (a - i - 1)/(b + i + 1) */ c = tmp * (c * x1); /* new term in the sum */ sum += c; ratio *= tmp; ap--; bp++; } /* I(x, a, b) = 1 - I(1 - x, b, a) */ double lIx = (x > 0.5) ? pbeta(x1m, bp, ap, /*l._t.*/0, /*give_log*/1) : pbeta(x, ap, bp, /*l._t.*/1, /*give_log*/1); return(-gammafn(a + b) * sum + (ratio * ap) * exp(lgammafn(ap) + lgammafn(bp) + lIx)); } /* The frontend called by actuar_do_betaint() */ double betaint(double x, double a, double b) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(a) || ISNAN(b)) return x + a + b; #endif if (!R_FINITE(a)) return(R_PosInf); if (a <= 0 || x <= 0 || x >= 1) return R_NaN; return betaint_raw(x, a, b, 0.5 - x + 0.5); } /* * R TO C INTERFACE * * This is a streamlined version of the scheme in dpq.c * */ #define mod_iterate2(n1, n2, n3, i1, i2, i3) \ for (i = i1 = i2 = i3 = 0; i < n; \ i1 = (++i1 == n1) ? 0 : i1, \ i2 = (++i2 == n2) ? 0 : i2, \ i3 = (++i3 == n3) ? 0 : i3, \ ++i) /* Function called by .External() */ SEXP actuar_do_betaint(SEXP args) { SEXP sx, sa, sb, sy; R_xlen_t i, ix, ia, ib, n, nx, na, nb; double xi, ai, bi, *x, *a, *b, *y; Rboolean naflag = FALSE; args = CDR(args); /* drop function name from arguments */ if (!isNumeric(CAR(args))|| !isNumeric(CADR(args)) || !isNumeric(CADDR(args))) error(_("invalid arguments")); nx = XLENGTH(CAR(args)); na = XLENGTH(CADR(args)); nb = XLENGTH(CADDR(args)); if ((nx == 0) || (na == 0) || (nb == 0)) return(allocVector(REALSXP, 0)); n = nx; if (n < na) n = na; if (n < nb) n = nb; PROTECT(sx = coerceVector(CAR(args), REALSXP)); PROTECT(sa = coerceVector(CADR(args), REALSXP)); PROTECT(sb = coerceVector(CADDR(args), REALSXP)); PROTECT(sy = allocVector(REALSXP, n)); x = REAL(sx); a = REAL(sa); b = REAL(sb); y = REAL(sy); mod_iterate2(nx, na, nb, ix, ia, ib) { xi = x[ix]; ai = a[ia]; bi = b[ib]; if (ISNA(xi) || ISNA(ai) || ISNA(bi)) y[i] = NA_REAL; else if (ISNAN(xi) || ISNAN(ai) || ISNAN(bi)) y[i] = R_NaN; else { y[i] = betaint(xi, ai, bi); if (ISNAN(y[i])) naflag = TRUE; } } if (naflag) warning(R_MSG_NA); if (n == nx) SHALLOW_DUPLICATE_ATTRIB(sy, sx); else if (n == na) SHALLOW_DUPLICATE_ATTRIB(sy, sa); else if (n == nb) SHALLOW_DUPLICATE_ATTRIB(sy, sb); UNPROTECT(4); return sy; } actuar/src/hierarc.c0000644000176200001440000001776015147745722014132 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Function to compute the iterative part of function cm, used * to deal with credibility models. * * AUTHORS: Tommy Ouellet, Vincent Goulet */ #include #include #include #include "locale.h" #define CAD5R(e) CAR(CDR(CDR(CDR(CDR(CDR(e)))))) #define CAD6R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(e))))))) #define CAD7R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e)))))))) #define CAD8R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e))))))))) #define CAD9R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e)))))))))) #define abs(x) ((x) >= 0 ? (x) : -(x)) #define weights(i, j) (cred[i][j] != 0 ? cred[i][j] : tweights[i + 1][j]) SEXP toSEXP(double *x, int size) { SEXP ans = allocVector(REALSXP, size); memcpy(REAL(ans), x, size * sizeof(double)); return ans; } SEXP actuar_do_hierarc(SEXP args) { SEXP s_cred, s_tweights, s_wmeans, s_fnodes, denoms, b, tol, maxit, echo; double **cred, **tweights, **wmeans, diff, bw; int **fnodes, nlevels, i, j, k, count = 0; /* All values received from R are protected. */ PROTECT(s_cred = coerceVector(CADR(args), VECSXP)); PROTECT(s_tweights = coerceVector(CADDR(args), VECSXP)); PROTECT(s_wmeans = coerceVector(CADDDR(args), VECSXP)); PROTECT(s_fnodes = coerceVector(CAD4R(args), VECSXP)); PROTECT(denoms = coerceVector(CAD5R(args), REALSXP)); PROTECT(b = coerceVector(CAD6R(args), REALSXP)); PROTECT(tol = coerceVector(CAD7R(args), REALSXP)); PROTECT(maxit = coerceVector(CAD8R(args), INTSXP)); PROTECT(echo = coerceVector(CAD9R(args), LGLSXP)); /* Initialization of some variables */ double bt[length(b)]; /* previous values of 'b' */ nlevels = length(b) - 1; /* number of levels in the model */ bt[nlevels] = REAL(b)[nlevels]; /* within entity variance; never * changes. */ int size[nlevels + 1]; /* total number of nodes at each * level, including the portfolio level */ size[0] = 1; for (i = 1; i <= nlevels; i++) size[i] = length(VECTOR_ELT(s_fnodes, i - 1)); /* Allocation of arrays that will be needed below. */ cred = (double **) R_alloc(nlevels, sizeof(double *)); tweights = (double **) R_alloc(nlevels + 1, sizeof(double *)); wmeans = (double **) R_alloc(nlevels + 1, sizeof(double *)); fnodes = (int **) R_alloc(nlevels, sizeof(int *)); tweights[0] = (double *) R_alloc(size[0], sizeof(double)); wmeans[0] = (double *) R_alloc(size[0], sizeof(double)); for (i = 1; i <= nlevels; i++) { cred[i - 1] = (double *) R_alloc(size[i], sizeof(double)); tweights[i] = (double *) R_alloc(size[i], sizeof(double)); wmeans[i] = (double *) R_alloc(size[i], sizeof(double)); fnodes[i - 1] = (int *) R_alloc(size[i], sizeof(int)); } /* Get values of fnodes, tweights and wmeans from R lists. For * the latter two, only the entity level values are initialized * in R or meaningful. */ for (i = 0; i < nlevels; i++) memcpy(fnodes[i], INTEGER(VECTOR_ELT(s_fnodes, i)), size[i + 1] * sizeof(int)); memcpy(tweights[nlevels], REAL(VECTOR_ELT(s_tweights, nlevels)), size[nlevels] * sizeof(double)); memcpy(wmeans[nlevels], REAL(VECTOR_ELT(s_wmeans, nlevels)), size[nlevels] * sizeof(double)); /* If printing of iterations was asked for, start by printing a * header and the starting values. */ if (LOGICAL(echo)[0]) { Rprintf("Iteration\tVariance estimates\n %d\t\t", count); for (i = 0; i < nlevels; i++) Rprintf(" %.8g ", REAL(b)[i]); Rprintf("\n"); } /* Iterative part. */ do { /* Stop after 'maxit' iterations and issue warning. */ if (++count > INTEGER(maxit)[0]) { warning(_("maximum number of iterations reached before obtaining convergence")); break; } /* Copy the previous values of 'b'. */ for (i = 0; i < nlevels; i++) bt[i] = REAL(b)[i]; /* Run through all levels from lowest to highest. */ for (i = nlevels - 1; i >= 0; i--) { /* Reset the total weights and weighted averages. */ for (j = 0; j < size[i]; j++) { tweights[i][j] = 0; wmeans[i][j] = 0; } /* Find the first non-zero within variance estimator. */ for (j = 1; REAL(b)[i + j] == 0; j++); bw = REAL(b)[i + j]; /* Calculation of the new credibility factors, total * weights and (numerators of) weighted averages. */ for (j = 0; j < size[i + 1]; j++) { cred[i][j] = 1.0/(1.0 + bw / (REAL(b)[i] * tweights[i + 1][j])); k = fnodes[i][j] - 1; /* C version of tapply(). */ tweights[i][k] += weights(i, j); wmeans[i][k] += weights(i, j) * wmeans[i + 1][j]; } /* Final calculation of weighted averages with the * division by the total weight. */ for (j = 0; j < size[i]; j++) { if (tweights[i][j] > 0) wmeans[i][j] = wmeans[i][j] / tweights[i][j]; else wmeans[i][j] = 0; } /* Calculation of the new current level variance estimator * only if the previous one is strictly positive. */ if (bt[i] > 0) { REAL(b)[i] = 0; for (j = 0; j < size[i + 1]; j++) { k = fnodes[i][j]; REAL(b)[i] += weights(i, j) * R_pow_di(wmeans[i + 1][j] - wmeans[i][k - 1], 2); } REAL(b)[i] = REAL(b)[i] / REAL(denoms)[i]; /* Set the estimator to 0 if it is close enough to 0 * and henceforth stop iterations on this * parameter. */ if (REAL(b)[i] <= R_pow_di(REAL(tol)[0], 2)) REAL(b)[i] = 0; } /* Recompute the credibility factors, total weights and * weighted means with the latest between variance * estimator. */ for (j = 0; j < size[i]; j++) { tweights[i][j] = 0; wmeans[i][j] = 0; } for (j = 0; j < size[i + 1]; j++) { cred[i][j] = 1.0/(1.0 + bw / (REAL(b)[i] * tweights[i + 1][j])); k = fnodes[i][j] - 1; tweights[i][k] += weights(i, j); wmeans[i][k] += weights(i, j) * wmeans[i + 1][j]; } for (j = 0; j < size[i]; j++) { if (tweights[i][j] > 0) wmeans[i][j] = wmeans[i][j] / tweights[i][j]; else wmeans[i][j] = 0; } } /* Trace */ if (LOGICAL(echo)[0]) { Rprintf(" %d\t\t", count); for (i = 0; i < nlevels; i++) Rprintf(" %.8g ", REAL(b)[i]); Rprintf("\n"); } /* Computation of the largest difference between two * iterations. Estimators set to 0 are not taken into * account. */ diff = 0; for (i = 0; i < nlevels; i++) if (REAL(b)[i] > 0) diff = fmax2(abs(REAL(b)[i] - bt[i])/bt[i], diff); } while (diff >= REAL(tol)[0]); /* Copy the final values to R lists. */ SET_VECTOR_ELT(s_tweights, 0, toSEXP(tweights[0], size[0])); SET_VECTOR_ELT(s_wmeans, 0, toSEXP(wmeans[0], size[0])); for (i = 1; i <= nlevels; i++) { SET_VECTOR_ELT(s_cred, i - 1, toSEXP(cred[i - 1], size[i])); SET_VECTOR_ELT(s_tweights, i, toSEXP(tweights[i], size[i])); SET_VECTOR_ELT(s_wmeans, i, toSEXP(wmeans[i], size[i])); } UNPROTECT(9); return(R_NilValue); } actuar/src/panjer.c0000644000176200001440000001472415147745722013771 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Function to compute the recursive part of the Panjer formula * to approximate the aggregate claim amount distribution of * a portfolio over a period. * * AUTHORS: Tommy Ouellet, Vincent Goulet */ #include #include #include #include "locale.h" #define CAD5R(e) CAR(CDR(CDR(CDR(CDR(CDR(e)))))) #define CAD6R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(e))))))) #define CAD7R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e)))))))) #define CAD8R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e))))))))) #define CAD9R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e)))))))))) #define CAD10R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e))))))))))) #define INITSIZE 100 /* default size for prob. vector */ SEXP actuar_do_panjer(SEXP args) { SEXP p0, p1, fs0, sfx, a, b, conv, tol, maxit, echo, sfs; double *fs, *fx, cumul; int upper, m, k, n, x = 1; double norm; /* normalizing constant */ double term; /* constant in the (a, b, 1) case */ /* The length of vector fs is not known in advance. We opt for a * simple scheme: allocate memory for a vector of size 'size', * double the size when the vector is full. */ int size = INITSIZE; fs = (double *) S_alloc(size, sizeof(double)); /* All values received from R are then protected. */ PROTECT(p0 = coerceVector(CADR(args), REALSXP)); PROTECT(p1 = coerceVector(CADDR(args), REALSXP)); PROTECT(fs0 = coerceVector(CADDDR(args), REALSXP)); PROTECT(sfx = coerceVector(CAD4R(args), REALSXP)); PROTECT(a = coerceVector(CAD5R(args), REALSXP)); PROTECT(b = coerceVector(CAD6R(args), REALSXP)); PROTECT(conv = coerceVector(CAD7R(args), INTSXP)); PROTECT(tol = coerceVector(CAD8R(args), REALSXP)); PROTECT(maxit = coerceVector(CAD9R(args), INTSXP)); PROTECT(echo = coerceVector(CAD10R(args), LGLSXP)); /* Initialization of some variables */ fx = REAL(sfx); /* severity distribution */ upper = length(sfx) - 1; /* severity distribution support upper bound */ fs[0] = REAL(fs0)[0]; /* value of Pr[S = 0] (computed in R) */ cumul = REAL(fs0)[0]; /* cumulative probability computed */ norm = 1 - REAL(a)[0] * fx[0]; /* normalizing constant */ n = INTEGER(conv)[0]; /* number of convolutions to do */ /* If printing of recursions was asked for, start by printing a * header and the probability at 0. */ if (LOGICAL(echo)[0]) Rprintf("x\tPr[S = x]\tCumulative probability\n%d\t%.8g\t%.8g\n", 0, fs[0], fs[0]); /* (a, b, 0) case (if p0 is NULL) */ if (isNull(CADR(args))) do { /* Stop after 'maxit' recursions and issue warning. */ if (x > INTEGER(maxit)[0]) { warning(_("maximum number of recursions reached before the probability distribution was complete")); break; } /* If fs is too small, double its size */ if (x >= size) { fs = (double *) S_realloc((char *) fs, size << 1, size, sizeof(double)); size = size << 1; } m = x; if (x > upper) m = upper; /* upper bound of the sum */ /* Compute probability up to the scaling constant */ for (k = 1; k <= m; k++) fs[x] += (REAL(a)[0] + REAL(b)[0] * k / x) * fx[k] * fs[x - k]; fs[x] = fs[x]/norm; /* normalization */ cumul += fs[x]; /* cumulative sum */ if (LOGICAL(echo)[0]) Rprintf("%d\t%.8g\t%.8g\n", x, fs[x], cumul); x++; } while (cumul < REAL(tol)[0]); /* (a, b, 1) case (if p0 is non-NULL) */ else { /* In the (a, b, 1) case, the recursion formula has an * additional term involving f_X(x). The mathematical notation * assumes that f_X(x) = 0 for x > m (the maximal value of the * distribution). We need to treat this specifically in * programming, though. */ double fxm; /* Constant term in the (a, b, 1) case. */ term = (REAL(p1)[0] - (REAL(a)[0] + REAL(b)[0]) * REAL(p0)[0]); do { /* Stop after 'maxit' recursions and issue warning. */ if (x > INTEGER(maxit)[0]) { warning(_("maximum number of recursions reached before the probability distribution was complete")); break; } if (x >= size) { fs = (double *) S_realloc((char *) fs, size << 1, size, sizeof(double)); size = size << 1; } m = x; if (x > upper) { m = upper; /* upper bound of the sum */ fxm = 0.0; /* i.e. no additional term */ } else fxm = fx[m]; /* i.e. additional term */ for (k = 1; k <= m; k++) fs[x] += (REAL(a)[0] + REAL(b)[0] * k / x) * fx[k] * fs[x - k]; fs[x] = (fs[x] + fxm * term) / norm; cumul += fs[x]; if (LOGICAL(echo)[0]) Rprintf("%d\t%.8g\t%.8g\n", x, fs[x], cumul); x++; } while (cumul < REAL(tol)[0]); } /* If needed, convolve the distribution obtained above with itself * using a very simple direct technique. Since we want to * continue storing the distribution in array 'fs', we need to * copy the vector in an auxiliary array at each convolution. */ if (n) { int i, j, ox; double *ofs; /* auxiliary array */ /* Resize 'fs' to its final size after 'n' convolutions. Each * convolution increases the length from 'x' to '2 * x - 1'. */ fs = (double *) S_realloc((char *) fs, (1 << n) * (x - 1) + 1, size, sizeof(double)); /* Allocate enough memory in the auxiliary array for the 'n' * convolutions. This is just slightly over half the final * size of 'fs'. */ ofs = (double *) S_alloc((1 << (n - 1)) * (x - 1) + 1, sizeof(double)); for (k = 0; k < n; k++) { memcpy(ofs, fs, x * sizeof(double)); /* keep previous array */ ox = x; /* previous array length */ x = (x << 1) - 1; /* new array length */ for(i = 0; i < x; i++) fs[i] = 0.0; for(i = 0; i < ox; i++) for(j = 0; j < ox; j++) fs[i + j] += ofs[i] * ofs[j]; } } /* Copy the values of fs to a SEXP which will be returned to R. */ PROTECT(sfs = allocVector(REALSXP, x)); memcpy(REAL(sfs), fs, x * sizeof(double)); UNPROTECT(11); return(sfs); } actuar/src/zmgeom.c0000644000176200001440000000753115151206331013765 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-modified geometric distribution. See * ../R/ZeroModifiedGeometric.R for details. * * Let X ~ Geometric(prob). The probability mass function of the * zero-modified Geometric random variable Z is * * Pr[Z = 0] = p0m * Pr[Z = x] = (1 - p0m) * Pr[X = x]/(1 - prob), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = 1 - (1 - p0m) * (1 - Pr[X <= x])/(1 - prob). * * Limiting case: prob == 1 has mass (1 - p0m) at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dzmgeom(double x, double prob, double p0m, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(prob) || ISNAN(p0m)) return x + prob + p0m; #endif if (prob <= 0 || prob > 1 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0 || !R_FINITE(x)) return ACT_D__0; if (x == 0) return ACT_D_val(p0m); /* NOTE: from now on x > 0 */ /* limiting case as prob -> 1 is point mass (1 - p0m) at one */ if (prob == 1) return (x == 1) ? ACT_D_Clog(p0m) : ACT_D__0; return ACT_D_val((1 - p0m) * dgeom(x - 1, prob, /*give_log*/0)); } double pzmgeom(double x, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(prob) || ISNAN(p0m)) return x + prob + p0m; #endif if (prob <= 0 || prob > 1 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; if (x < 1) return ACT_DT_val(p0m); /* NOTE: from now on x >= 1 */ /* limiting case as prob -> 1 is mass (1 - p0m) at one */ if (prob == 1) return ACT_DT_1; /* working in log scale improves accuracy */ return ACT_DT_CEval(log1p(-p0m) + pgeom(x - 1, prob, /*l._t.*/0, /*log_p*/1)); } double qzmgeom(double p, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(prob) || ISNAN(p0m)) return p + prob + p0m; #endif if (prob <= 0 || prob > 1 || p0m < 0 || p0m > 1) return R_NaN; ACT_Q_P01_check(p); if (p0m == 1) return 0.0; /* limiting case as prob -> 1 is mass (1 - p0m) at one */ if (prob == 1) return (ACT_DT_qIv(p) <= p0m) ? ACT_Q_p0lim(p0m) : 1.0; if (p == ACT_DT_0) return ACT_Q_p0lim(p0m); if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); /* at this point 0 < p < 1, so p0m = 0 is not an issue */ /* working in log scale improves accuracy */ return qgeom(-expm1(log1p(-prob) - log1p(-p0m) + log1p(-p)), prob, /*l._t.*/1, /*log_p*/0); } /* ALGORITHM FOR GENERATION OF RANDOM VARIATES * * 1. p0m >= p0: just simulate variates from the discrete mixture. * * 2. p0m < p0: fastest method depends p0m. * * 2.1 p0m < ACT_INVERSION: inversion method on a restricted range. * * 2.2 p0m >= ACT_INVERSION: simulate variates from discrete mixture * with the corresponding zero truncated distribution. * * The threshold ACT_INVERSION is distribution specific. */ #define ACT_INVERSION 0.4 double rzmgeom(double prob, double p0m) { if (!R_FINITE(prob) || prob <= 0 || prob > 1 || p0m < 0 || p0m > 1) return R_NaN; /* limiting case as p -> 1 is mass (1 - p0m) at one */ if (prob == 1) return (unif_rand() <= p0m) ? 0.0 : 1.0; /* p0m >= prob: generate from mixture */ if (p0m >= prob) return (unif_rand() * (1 - prob) < (1 - p0m)) ? rgeom(prob) : 0.0; /* inversion method */ if (p0m < ACT_INVERSION) return qgeom(runif((prob - p0m)/(1 - p0m), 1), prob, 1, 0); /* generate from zero truncated mixture */ return (unif_rand() <= p0m) ? 0.0 : 1 + rpois(exp_rand() * ((1 - prob) / prob)); } actuar/src/trgamma.c0000644000176200001440000001045215147745722014134 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the transformed gamma distribution. See ../R/TransformedGamma.R * for details. * * We work with the density expressed as * * shape2 * u^shape1 * e^(-u) / (x * gamma(shape1)) * * with u = (x/scale)^shape2. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dtrgamma(double x, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return x + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape1 * shape2 < 1) return R_PosInf; if (shape1 * shape2 > 1) return ACT_D__0; /* else */ return give_log ? log(shape2) - log(scale) - lgammafn(shape1) : shape2 / (scale * gammafn(shape1)); } double logu = shape2 * (log(x) - log(scale)); return ACT_D_exp(log(shape2) + shape1 * logu - exp(logu) - log(x) - lgammafn(shape1)); } double ptrgamma(double q, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return q + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double u = exp(shape2 * (log(q) - log(scale))); return pgamma(u, shape1, 1.0, lower_tail, log_p); } double qtrgamma(double p, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return p + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * R_pow(qgamma(p, shape1, 1.0, lower_tail, 0), 1.0/shape2); } double rtrgamma(double shape1, double shape2, double scale) { if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; return scale * R_pow(rgamma(shape1, 1.0), 1.0/shape2); } double mtrgamma(double order, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return order + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape1 * shape2) return R_PosInf; return R_pow(scale, order) * gammafn(shape1 + order/shape2) / gammafn(shape1); } double levtrgamma(double limit, double shape1, double shape2, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale) || ISNAN(order)) return limit + shape1 + shape2 + scale + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape1 * shape2) return R_PosInf; if (limit <= 0.0) return 0.0; double u, tmp; tmp = shape1 + order / shape2; u = exp(shape2 * (log(limit) - log(scale))); return R_pow(scale, order) * gammafn(tmp) * pgamma(u, tmp, 1.0, 1, 0) / gammafn(shape1) + ACT_DLIM__0(limit, order) * pgamma(u, shape1, 1.0, 0, 0); } actuar/src/ztpois.c0000644000176200001440000000467215151206331014022 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-truncated Poisson distribution. See * ../R/ZeroTruncatedPoisson.R for details. * * Let X ~ Poisson(lambda). The probability mass function of the * zero-truncated Poisson random variable Z is * * Pr[Z = 0] = 0 * Pr[Z = x] = Pr[X = x]/(1 - exp(-lambda)), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = (Pr[X <= x] - exp(-lambda))/(1 - exp(-lambda)) * * Limiting case: lambda == 0 is point mass at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dztpois(double x, double lambda, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(lambda)) return x + lambda; #endif if (lambda < 0) return R_NaN; if (x < 1 || !R_FINITE(x)) return ACT_D__0; /* limiting case as lambda -> 0 is point mass at one */ if (lambda == 0) return (x == 1) ? ACT_D__1 : ACT_D__0; return ACT_D_exp(dpois(x, lambda, /*give_log*/1) - log1mexp(lambda)); } double pztpois(double x, double lambda, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(lambda)) return x + lambda; #endif if (lambda < 0) return R_NaN; if (x < 1) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; /* limiting case as lambda -> 0 is point mass at one */ if (lambda == 0) return (x >= 1) ? ACT_DT_1 : ACT_DT_0; return ACT_DT_Cval(ppois(x, lambda, /*l._t.*/0, /*log_p*/0)/(-expm1(-lambda))); } double qztpois(double p, double lambda, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(lambda)) return p + lambda; #endif if (lambda < 0 || !R_FINITE(lambda)) return R_NaN; ACT_Q_P01_check(p); /* limiting case as lambda -> 0 is point at one */ if (lambda == 0) return 1.0; if (p == ACT_DT_0) return 1.0; if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); double p0c = -expm1(-lambda); return qpois(p0c * p + (1 - p0c), lambda, /*l._t.*/1, /*log_p*/0); } double rztpois(double lambda) { if (lambda < 0 || !R_FINITE(lambda)) return R_NaN; /* limiting case as lambda -> 0 is point mass at one */ if (lambda == 0) return 1.0; return qpois(runif(exp(-lambda), 1), lambda, 1, 0); } actuar/src/util.c0000644000176200001440000003013615147745722013462 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Various utility functions for matrix algebra and sampling from * discrete distributions. * * The functions therein use LAPACK and BLAS routines. Nicely * formatted man pages for these can be found at * * * * AUTHORS: Vincent Goulet , Christophe * Dutang */ #define USE_FC_LEN_T #include #include #include #include #ifndef FCONE # define FCONE #endif #include "locale.h" /* For matrix exponential calculations. Pade constants * * n_{pqj} = [(p + q - j)! p!]/[(p + q)! j! (p - j)!] * * and * * d_{pqj} = [(p + q - j)! q!]/[(p + q)! j! (q - j)!] * * for p = q = 8 and j = 1, ..., 8. */ const static double padec88 [] = { 5.0000000000000000e-1, 1.1666666666666667e-1, 1.6666666666666667e-2, 1.6025641025641026e-3, 1.0683760683760684e-4, 4.8562548562548563e-6, 1.3875013875013875e-7, 1.9270852604185938e-9, }; /* Matrix exponential exp(x), where x is an (n x n) matrix. Result z * is an (n x n) matrix. Mostly lifted from the core of function * expm() of package Matrix, which is itself based on the function of * the same name in Octave. */ void actuar_expm(double *x, int n, double *z) { if (n == 1) z[0] = exp(x[0]); /* scalar exponential */ else { /* Constants */ int i, j; int nsqr = n * n, np1 = n + 1, is_uppertri = TRUE; int iloperm, ihiperm, iloscal, ihiscal, info, sqrpowscal; double infnorm, trshift, one = 1.0, zero = 0.0, m1pj = -1; /* Arrays */ int *pivot = (int *) R_alloc(n, sizeof(int)); /* pivot vector */ int *invperm = (int *) R_alloc(n, sizeof(int)); /* inverse permutation vector */ double *perm = (double *) R_alloc(n, sizeof(double)); /* permutation array */ double *scale = (double *) R_alloc(n, sizeof(double)); /* scale array */ double *work = (double *) R_alloc(nsqr, sizeof(double)); /* workspace array */ double *npp = (double *) R_alloc(nsqr, sizeof(double)); /* num. power Pade */ double *dpp = (double *) R_alloc(nsqr, sizeof(double)); /* denom. power Pade */ R_CheckStack(); Memcpy(z, x, nsqr); /* Check if matrix x is upper triangular; stop checking as * soon as a non-zero value is found below the diagonal. */ for (i = 0; i < n - 1 && is_uppertri; i++) for (j = i + 1; j < n; j++) if (!(is_uppertri = x[i * n + j] == 0.0)) break; /* Step 1 of preconditioning: shift diagonal by average * diagonal if positive. */ trshift = 0.0; for (i = 0; i < n; i++) trshift += x[i * np1]; trshift /= n; /* average diagonal element */ if (trshift > 0.0) for (i = 0; i < n; i++) z[i * np1] -= trshift; /* Step 2 of preconditioning: balancing with dgebal. */ if (is_uppertri) { /* no need to permute if x is upper triangular */ iloperm = 1; ihiperm = n; } else { F77_CALL(dgebal)("P", &n, z, &n, &iloperm, &ihiperm, perm, &info FCONE); if (info) error(_("LAPACK routine dgebal returned info code %d when permuting"), info); } F77_CALL(dgebal)("S", &n, z, &n, &iloscal, &ihiscal, scale, &info FCONE); if (info) error(_("LAPACK routine dgebal returned info code %d when scaling"), info); /* Step 3 of preconditioning: Scaling according to infinity * norm (a priori always needed). */ infnorm = F77_CALL(dlange)("I", &n, &n, z, &n, work FCONE); sqrpowscal = (infnorm > 0) ? imax2((int) 1 + log(infnorm)/M_LN2, 0) : 0; if (sqrpowscal > 0) { double scalefactor = R_pow_di(2, sqrpowscal); for (i = 0; i < nsqr; i++) z[i] /= scalefactor; } /* Pade approximation (p = q = 8): compute x^8, x^7, x^6, * ..., x^1 */ for (i = 0; i < nsqr; i++) { npp[i] = 0.0; dpp[i] = 0.0; } for (j = 7; j >= 0; j--) { /* npp = z * npp + padec88[j] * z */ F77_CALL(dgemm) ("N", "N", &n, &n, &n, &one, z, &n, npp, &n, &zero, work, &n FCONE FCONE); /* npp <- work + padec88[j] * z */ for (i = 0; i < nsqr; i++) npp[i] = work[i] + padec88[j] * z[i]; /* dpp = z * dpp + (-1)^j * padec88[j] * z */ F77_CALL(dgemm) ("N", "N", &n, &n, &n, &one, z, &n, dpp, &n, &zero, work, &n FCONE FCONE); for (i = 0; i < nsqr; i++) dpp[i] = work[i] + m1pj * padec88[j] * z[i]; m1pj *= -1; /* (-1)^j */ } /* power 0 */ for (i = 0; i < nsqr; i++) dpp[i] *= -1.0; for (j = 0; j < n; j++) { npp[j * np1] += 1.0; dpp[j * np1] += 1.0; } /* Pade approximation is (dpp)^-1 * npp. */ F77_CALL(dgetrf) (&n, &n, dpp, &n, pivot, &info); if (info) error(_("LAPACK routine dgetrf returned info code %d"), info); F77_CALL(dgetrs) ("N", &n, &n, dpp, &n, pivot, npp, &n, &info FCONE); if (info) error(_("LAPACK routine dgetrs returned info code %d"), info); Memcpy(z, npp, nsqr); /* Now undo all of the preconditioning */ /* Preconditioning 3: square the result for every power of 2 */ while (sqrpowscal--) { F77_CALL(dgemm)("N", "N", &n, &n, &n, &one, z, &n, z, &n, &zero, work, &n FCONE FCONE); Memcpy(z, work, nsqr); } /* Preconditioning 2: apply inverse scaling */ for (j = 0; j < n; j++) for (i = 0; i < n; i++) z[i + j * n] *= scale[i]/scale[j]; /* Inverse permuation if x is not upper triangular and 'perm' * is not the identity permutation */ if ((iloperm != 1 || ihiperm != n) && !is_uppertri) { /* balancing permutation vector */ for (i = 0; i < n; i++) invperm[i] = i; /* identity permutation */ /* leading permutations applied in forward order */ for (i = 0; i < (iloperm - 1); i++) { int permutedindex = (int) (perm[i]) - 1; int tmp = invperm[i]; invperm[i] = invperm[permutedindex]; invperm[permutedindex] = tmp; } /* trailing permutations applied in reverse order */ for (i = n - 1; i >= ihiperm; i--) { int permutedindex = (int) (perm[i]) - 1; int tmp = invperm[i]; invperm[i] = invperm[permutedindex]; invperm[permutedindex] = tmp; } /* construct inverse balancing permutation vector */ Memcpy(pivot, invperm, n); for (i = 0; i < n; i++) invperm[pivot[i]] = i; /* apply inverse permutation */ Memcpy(work, z, nsqr); for (j = 0; j < n; j++) for (i = 0; i < n; i++) z[i + j * n] = work[invperm[i] + invperm[j] * n]; } /* Preconditioning 1: Trace normalization */ if (trshift > 0) { double mult = exp(trshift); for (i = 0; i < nsqr; i++) z[i] *= mult; } } } /* Product x * exp(M) * y, where x is an (1 x n) vector, M is an (n x * n) matrix and y is an (n x 1) vector. Result z is a scalar. */ double actuar_expmprod(double *x, double *M, double *y, int n) { char *transa = "N"; int p = 1; double one = 1.0, zero = 0.0, *tmp, *expM; tmp = (double *) R_alloc(n, sizeof(double)); /* intermediate vector */ expM = (double *) R_alloc(n * n, sizeof(double)); /* matrix exponential */ /* Compute exp(M) */ actuar_expm(M, n, expM); /* Product tmp := x * exp(M) * (Dimensions: 1 x n 1 x n n x n) */ F77_CALL(dgemm)(transa, transa, &p, &n, &n, &one, x, &p, expM, &n, &zero, tmp, &p FCONE FCONE); /* Product z := tmp * y * (Dimensions: 1 x 1 1 x n n x 1) */ return F77_CALL(ddot)(&n, tmp, &p, y, &p); } /* Solution of a real system of linear equations AX = B, where A is an * (n x n) matrix and B is an (n x p) matrix. Essentially a simple * interface to the LAPACK routine DGESV based on modLa_dgesv() in * modules/lapack/laphack.c of R sources. Very little error checking * (e.g. no check that A is square) since it is currently used in a * very narrow and already controlled context. */ void actuar_solve(double *A, double *B, int n, int p, double *z) { int info, *ipiv; double *Avals; if (n == 0) error(_("'A' is 0-diml")); if (p == 0) error(_("no right-hand side in 'B'")); ipiv = (int *) R_alloc(n, sizeof(int)); /* Work on copies of A and B since they are overwritten by dgesv. */ Avals = (double *) R_alloc(n * n, sizeof(double)); Memcpy(Avals, A, (size_t) (n * n)); Memcpy(z, B, (size_t) (n * p)); F77_CALL(dgesv)(&n, &p, Avals, &n, ipiv, z, &n, &info); if (info < 0) error(_("argument %d of Lapack routine dgesv had invalid value"), -info); if (info > 0) error(_("Lapack routine dgesv: system is exactly singular")); } /* Power of a matrix x^k := x x ... x, where x in an (n x n) matrix * and k is an *integer* (including -1). This function is fairly naive * with little error checking since it is currently used in a very * narrow and already controlled context. */ void actuar_matpow(double *x, int n, int k, double *z) { if (k == 0) { /* Return identity matrix */ int i, j; for (i = 0; i < n; i++) for (j = 0; j < n; j++) z[i * n + j] = (i == j) ? 1.0 : 0.0; } else { char *transa = "N"; double one = 1.0, zero = 0.0, *tmp, *xtmp; xtmp = (double *) R_alloc(n * n, sizeof(double)); /* If k is negative, invert matrix first. */ if (k < 0) { k = -k; /* Create identity matrix for use in actuar_solve() */ int i, j; double *y = (double *) R_alloc(n * n, sizeof(double)); for (i = 0; i < n; i++) for (j = 0; j < n; j++) y[i * n + j] = (i == j) ? 1.0 : 0.0; /* Inverse */ actuar_solve(x, y, n, n, xtmp); } else Memcpy(xtmp, x, (size_t) (n * n)); /* Take powers in multiples of 2 until there is only one * product left to make. That is, if k = 5, compute (x * x), * then ((x * x) * (x * x)) and finally ((x * x) * (x * x)) * * x. Idea taken from Octave in file .../src/xpow.cc. */ Memcpy(z, xtmp, (size_t) (n * n)); k--; tmp = (double *) R_alloc(n * n, sizeof(double)); while (k > 0) { if (k & 1) /* z = z * xtmp */ { F77_CALL(dgemm)(transa, transa, &n, &n, &n, &one, z, &n, xtmp, &n, &zero, tmp, &n FCONE FCONE); Memcpy(z, tmp, (size_t) (n * n)); } k >>= 1; /* efficient division by 2 */ if (k > 0) /* xtmp = xtmp * xtmp */ { F77_CALL(dgemm)(transa, transa, &n, &n, &n, &one, xtmp, &n, xtmp, &n, &zero, tmp, &n FCONE FCONE); Memcpy(xtmp, tmp, (size_t) (n * n)); } } } } /* Simple function to sample one value from a discrete distribution on * 0, 1, ..., n - 1, n using probabilities p[0], ..., p[n - 1], 1 - * (p[0] + ... + p[n - 1]). */ int SampleSingleValue(int n, double *p) { int i; double pcum = p[0], u = unif_rand(); for (i = 0; u > pcum && i < n; i++) if (i < n - 1) pcum += p[i + 1]; return i; } actuar/src/logarithmic.c0000644000176200001440000001044015151206331014762 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, and to simulate random variates for the logarithmic * discrete distribution. See ../R/Logarithmic.R for details. * * We work with the probability mass function expressed as * * a * p^x / x, x = 1, 2, ... * * with a = -1/log(1 - p). * * AUTHOR: Vincent Goulet */ #include #include #include /* for R_CheckUserInterrupt() */ #include "locale.h" #include "dpq.h" double dlogarithmic(double x, double prob, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(prob)) return x + prob; #endif if (prob < 0 || prob >= 1) return R_NaN; ACT_D_nonint_check(x); if (!R_FINITE(x) || x < 1) return ACT_D__0; /* limiting case as prob -> 0 is point mass at one */ if (prob == 0) return (x == 1) ? ACT_D__1 : ACT_D__0; x = ACT_forceint(x); double a = -1.0/log1p(-prob); return ACT_D_exp(log(a) + x * log(prob) - log(x)); } /* For plogarithmic(), there does not seem to be algorithms much more * elaborate that successive computations of the probabilities using * the recurrence relationship * * P[X = x + 1] = p * x * Pr[X = x] / (x + 1), x = 2, 3, ... * * with Pr[X = 1] = -p/log(1 - p). This is what is done here. */ double plogarithmic(double q, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(prob)) return q + prob; #endif if (prob < 0 || prob >= 1) return R_NaN; if (q < 1) return ACT_DT_0; if (!R_FINITE(q)) return ACT_DT_1; /* limiting case as prob -> 0 is point mass at one. */ if (prob == 0) return (q >= 1) ? ACT_DT_1 : ACT_DT_0; int k; double s, pk; pk = -prob/log1p(-prob); /* Pr[X = 1] */ s = pk; if (q == 1) return ACT_DT_val(s); /* simple case */ for (k = 1; k < q; k++) { pk *= prob * k/(k + 1.0); s += pk; } return ACT_DT_val(s); } /* For qlogarithmic() we mostly reuse the code from qnbinom() et al. * of R sources. From src/nmath/qnbinom.c: * * METHOD * * Uses the Cornish-Fisher Expansion to include a skewness * correction to a normal approximation. This gives an * initial value which never seems to be off by more than * 1 or 2. A search is then conducted of values close to * this initial start point. */ #define _thisDIST_ logarithmic #define _dist_PARS_DECL_ double prob #define _dist_PARS_ prob #include "qDiscrete_search.h" /* do_search() et al. */ double qlogarithmic(double p, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(prob)) return p + prob; #endif if (prob < 0 || prob >= 1) return R_NaN; ACT_Q_P01_check(p); /* limiting case as prob -> 0 is point mass at one */ if (prob == 0) return 1.0; if (p == ACT_DT_0) return 1.0; if (p == ACT_DT_1) return R_PosInf; double a = -1.0/log1p(-prob), P = a * prob, Q = 1.0/(0.5 - prob + 0.5), mu = P * Q, sigma = sqrt(mu * (Q - mu)), gamma = (P * (1 + prob - P*(3 + 2*P)) * R_pow_di(Q, 3))/R_pow_di(sigma, 3); q_DISCRETE_DECL; q_DISCRETE_BODY(); } /* rlogarithmic() is an implementation with automatic selection of * the LS and LK algorithms of: * * Kemp, A. W. (1981), Efficient Generation of Logarithmically * Distributed Pseudo-Random Variables, Journal of the Royal * Statistical Society, Series C. Vol. 30, p. 249-253. * URL http://www.jstor.org/stable/2346348 * * The algorithms are also discussed in chapter 10 of Devroye (1986). */ double rlogarithmic(double prob) { if (prob < 0 || prob > 1) return R_NaN; /* limiting case as prob -> 0 is point mass at one. */ if (prob == 0) return 1.0; /* Automatic selection between the LS and LK algorithms */ if (prob < 0.95) { double s = -prob/log1p(-prob); double x = 1.0; double u = unif_rand(); while (u > s) { u -= s; x += 1.0; s *= prob * (x - 1.0)/x; } return x; } /* else (prob >= 0.95) */ { double r = log1p(-prob); double v = unif_rand(); if (v >= prob) return 1.0; double u = unif_rand(); double q = -expm1(r * u); if (v <= (q * q)) return floor(1.0 + log(v)/log(q)); if (v <= q) return 2.0; /* case q^2 < v <= q */ return 1.0; /* case v > q */ } } actuar/src/init.c0000644000176200001440000003753615147745722013463 0ustar liggesusers/* * Native routines registration, as per "Writing R extensions" and * definition of native interface to one routine exported by package * expint. * */ #include #include #include #include /* optional; for expressiveness */ #include "actuar.h" static const R_ExternalMethodDef ExternalEntries[] = { {"actuar_do_random", (DL_FUNC) &actuar_do_random, -1}, {"actuar_do_randomphtype", (DL_FUNC) &actuar_do_randomphtype, -1}, {"actuar_do_dpq", (DL_FUNC) &actuar_do_dpq, -1}, {"actuar_do_dpqphtype", (DL_FUNC) &actuar_do_dpqphtype, -1}, {"actuar_do_betaint", (DL_FUNC) &actuar_do_betaint, -1}, {"actuar_do_hierarc", (DL_FUNC) &actuar_do_hierarc, -1}, {"actuar_do_panjer", (DL_FUNC) &actuar_do_panjer, -1}, {NULL, NULL, 0} }; void attribute_visible R_init_actuar(DllInfo *dll) { R_registerRoutines(dll, NULL, NULL, NULL, ExternalEntries); R_useDynamicSymbols(dll, FALSE); R_forceSymbols(dll, TRUE); /* Native interface to routine from package expint */ actuar_gamma_inc = (double(*)(double,double)) R_GetCCallable("expint", "gamma_inc"); /* Registration of actuar exported functions */ /* one parameter distributions */ R_RegisterCCallable("actuar", "mexp", (DL_FUNC) mexp); R_RegisterCCallable("actuar", "levexp", (DL_FUNC) levexp); R_RegisterCCallable("actuar", "mgfexp", (DL_FUNC) mgfexp); R_RegisterCCallable("actuar", "dinvexp", (DL_FUNC) dinvexp); R_RegisterCCallable("actuar", "pinvexp", (DL_FUNC) pinvexp); R_RegisterCCallable("actuar", "qinvexp", (DL_FUNC) qinvexp); R_RegisterCCallable("actuar", "rinvexp", (DL_FUNC) rinvexp); R_RegisterCCallable("actuar", "minvexp", (DL_FUNC) minvexp); R_RegisterCCallable("actuar", "levinvexp", (DL_FUNC) levinvexp); R_RegisterCCallable("actuar", "dlogarithmic", (DL_FUNC) dlogarithmic); R_RegisterCCallable("actuar", "plogarithmic", (DL_FUNC) plogarithmic); R_RegisterCCallable("actuar", "qlogarithmic", (DL_FUNC) qlogarithmic); R_RegisterCCallable("actuar", "rlogarithmic", (DL_FUNC) rlogarithmic); R_RegisterCCallable("actuar", "dztpois", (DL_FUNC) dztpois); R_RegisterCCallable("actuar", "pztpois", (DL_FUNC) pztpois); R_RegisterCCallable("actuar", "qztpois", (DL_FUNC) qztpois); R_RegisterCCallable("actuar", "rztpois", (DL_FUNC) rztpois); R_RegisterCCallable("actuar", "dztgeom", (DL_FUNC) dztgeom); R_RegisterCCallable("actuar", "pztgeom", (DL_FUNC) pztgeom); R_RegisterCCallable("actuar", "qztgeom", (DL_FUNC) qztgeom); R_RegisterCCallable("actuar", "rztgeom", (DL_FUNC) rztgeom); /* two parameter distributions */ R_RegisterCCallable("actuar", "munif", (DL_FUNC) munif); R_RegisterCCallable("actuar", "levunif", (DL_FUNC) levunif); R_RegisterCCallable("actuar", "mgfunif", (DL_FUNC) mgfunif); R_RegisterCCallable("actuar", "mnorm", (DL_FUNC) mnorm); R_RegisterCCallable("actuar", "mgfnorm", (DL_FUNC) mgfnorm); R_RegisterCCallable("actuar", "mbeta", (DL_FUNC) mbeta); R_RegisterCCallable("actuar", "levbeta", (DL_FUNC) levbeta); R_RegisterCCallable("actuar", "mgamma", (DL_FUNC) mgamma); R_RegisterCCallable("actuar", "levgamma", (DL_FUNC) levgamma); R_RegisterCCallable("actuar", "mgfgamma", (DL_FUNC) mgfgamma); R_RegisterCCallable("actuar", "mchisq", (DL_FUNC) mchisq); R_RegisterCCallable("actuar", "levchisq", (DL_FUNC) levchisq); R_RegisterCCallable("actuar", "mgfchisq", (DL_FUNC) mgfchisq); R_RegisterCCallable("actuar", "dinvgamma", (DL_FUNC) dinvgamma); R_RegisterCCallable("actuar", "pinvgamma", (DL_FUNC) pinvgamma); R_RegisterCCallable("actuar", "qinvgamma", (DL_FUNC) qinvgamma); R_RegisterCCallable("actuar", "rinvgamma", (DL_FUNC) rinvgamma); R_RegisterCCallable("actuar", "minvgamma", (DL_FUNC) minvgamma); R_RegisterCCallable("actuar", "levinvgamma", (DL_FUNC) levinvgamma); R_RegisterCCallable("actuar", "mgfinvgamma", (DL_FUNC) mgfinvgamma); R_RegisterCCallable("actuar", "dinvparalogis", (DL_FUNC) dinvparalogis); R_RegisterCCallable("actuar", "pinvparalogis", (DL_FUNC) pinvparalogis); R_RegisterCCallable("actuar", "qinvparalogis", (DL_FUNC) qinvparalogis); R_RegisterCCallable("actuar", "rinvparalogis", (DL_FUNC) rinvparalogis); R_RegisterCCallable("actuar", "minvparalogis", (DL_FUNC) minvparalogis); R_RegisterCCallable("actuar", "levinvparalogis", (DL_FUNC) levinvparalogis); R_RegisterCCallable("actuar", "dinvpareto", (DL_FUNC) dinvpareto); R_RegisterCCallable("actuar", "pinvpareto", (DL_FUNC) pinvpareto); R_RegisterCCallable("actuar", "qinvpareto", (DL_FUNC) qinvpareto); R_RegisterCCallable("actuar", "rinvpareto", (DL_FUNC) rinvpareto); R_RegisterCCallable("actuar", "minvpareto", (DL_FUNC) minvpareto); R_RegisterCCallable("actuar", "levinvpareto", (DL_FUNC) levinvpareto); R_RegisterCCallable("actuar", "dinvweibull", (DL_FUNC) dinvweibull); R_RegisterCCallable("actuar", "pinvweibull", (DL_FUNC) pinvweibull); R_RegisterCCallable("actuar", "qinvweibull", (DL_FUNC) qinvweibull); R_RegisterCCallable("actuar", "rinvweibull", (DL_FUNC) rinvweibull); R_RegisterCCallable("actuar", "minvweibull", (DL_FUNC) minvweibull); R_RegisterCCallable("actuar", "levinvweibull", (DL_FUNC) levinvweibull); R_RegisterCCallable("actuar", "dlgamma", (DL_FUNC) dlgamma); R_RegisterCCallable("actuar", "plgamma", (DL_FUNC) plgamma); R_RegisterCCallable("actuar", "qlgamma", (DL_FUNC) qlgamma); R_RegisterCCallable("actuar", "rlgamma", (DL_FUNC) rlgamma); R_RegisterCCallable("actuar", "mlgamma", (DL_FUNC) mlgamma); R_RegisterCCallable("actuar", "levlgamma", (DL_FUNC) levlgamma); R_RegisterCCallable("actuar", "dllogis", (DL_FUNC) dllogis); R_RegisterCCallable("actuar", "pllogis", (DL_FUNC) pllogis); R_RegisterCCallable("actuar", "qllogis", (DL_FUNC) qllogis); R_RegisterCCallable("actuar", "rllogis", (DL_FUNC) rllogis); R_RegisterCCallable("actuar", "mllogis", (DL_FUNC) mllogis); R_RegisterCCallable("actuar", "levllogis", (DL_FUNC) levllogis); R_RegisterCCallable("actuar", "mlnorm", (DL_FUNC) mlnorm); R_RegisterCCallable("actuar", "levlnorm", (DL_FUNC) levlnorm); R_RegisterCCallable("actuar", "dparalogis", (DL_FUNC) dparalogis); R_RegisterCCallable("actuar", "pparalogis", (DL_FUNC) pparalogis); R_RegisterCCallable("actuar", "qparalogis", (DL_FUNC) qparalogis); R_RegisterCCallable("actuar", "rparalogis", (DL_FUNC) rparalogis); R_RegisterCCallable("actuar", "mparalogis", (DL_FUNC) mparalogis); R_RegisterCCallable("actuar", "levparalogis", (DL_FUNC) levparalogis); R_RegisterCCallable("actuar", "dpareto", (DL_FUNC) dpareto); R_RegisterCCallable("actuar", "ppareto", (DL_FUNC) ppareto); R_RegisterCCallable("actuar", "qpareto", (DL_FUNC) qpareto); R_RegisterCCallable("actuar", "rpareto", (DL_FUNC) rpareto); R_RegisterCCallable("actuar", "mpareto", (DL_FUNC) mpareto); R_RegisterCCallable("actuar", "levpareto", (DL_FUNC) levpareto); R_RegisterCCallable("actuar", "dpareto1", (DL_FUNC) dpareto1); R_RegisterCCallable("actuar", "ppareto1", (DL_FUNC) ppareto1); R_RegisterCCallable("actuar", "qpareto1", (DL_FUNC) qpareto1); R_RegisterCCallable("actuar", "rpareto1", (DL_FUNC) rpareto1); R_RegisterCCallable("actuar", "mpareto1", (DL_FUNC) mpareto1); R_RegisterCCallable("actuar", "levpareto1", (DL_FUNC) levpareto1); R_RegisterCCallable("actuar", "mweibull", (DL_FUNC) mweibull); R_RegisterCCallable("actuar", "levweibull", (DL_FUNC) levweibull); /* R_RegisterCCallable("actuar", "minvGauss", (DL_FUNC) minvGauss); [defunct v3.0-0] */ /* R_RegisterCCallable("actuar", "levinvGauss", (DL_FUNC) levinvGauss); [idem] */ /* R_RegisterCCallable("actuar", "mgfinvGauss", (DL_FUNC) mgfinvGauss); [idem] */ R_RegisterCCallable("actuar", "dgumbel", (DL_FUNC) dgumbel); R_RegisterCCallable("actuar", "pgumbel", (DL_FUNC) pgumbel); R_RegisterCCallable("actuar", "qgumbel", (DL_FUNC) qgumbel); R_RegisterCCallable("actuar", "rgumbel", (DL_FUNC) rgumbel); R_RegisterCCallable("actuar", "mgumbel", (DL_FUNC) mgumbel); R_RegisterCCallable("actuar", "mgfgumbel", (DL_FUNC) mgfgumbel); R_RegisterCCallable("actuar", "dinvgauss", (DL_FUNC) dinvgauss); R_RegisterCCallable("actuar", "pinvgauss", (DL_FUNC) pinvgauss); R_RegisterCCallable("actuar", "qinvgauss", (DL_FUNC) qinvgauss); R_RegisterCCallable("actuar", "rinvgauss", (DL_FUNC) rinvgauss); R_RegisterCCallable("actuar", "minvgauss", (DL_FUNC) minvgauss); R_RegisterCCallable("actuar", "levinvgauss", (DL_FUNC) levinvgauss); R_RegisterCCallable("actuar", "mgfinvgauss", (DL_FUNC) mgfinvgauss); R_RegisterCCallable("actuar", "dztnbinom", (DL_FUNC) dztnbinom); R_RegisterCCallable("actuar", "pztnbinom", (DL_FUNC) pztnbinom); R_RegisterCCallable("actuar", "qztnbinom", (DL_FUNC) qztnbinom); R_RegisterCCallable("actuar", "rztnbinom", (DL_FUNC) rztnbinom); R_RegisterCCallable("actuar", "dztbinom", (DL_FUNC) dztbinom); R_RegisterCCallable("actuar", "pztbinom", (DL_FUNC) pztbinom); R_RegisterCCallable("actuar", "qztbinom", (DL_FUNC) qztbinom); R_RegisterCCallable("actuar", "rztbinom", (DL_FUNC) rztbinom); R_RegisterCCallable("actuar", "dzmlogarithmic", (DL_FUNC) dzmlogarithmic); R_RegisterCCallable("actuar", "pzmlogarithmic", (DL_FUNC) pzmlogarithmic); R_RegisterCCallable("actuar", "qzmlogarithmic", (DL_FUNC) qzmlogarithmic); R_RegisterCCallable("actuar", "rzmlogarithmic", (DL_FUNC) rzmlogarithmic); R_RegisterCCallable("actuar", "dzmpois", (DL_FUNC) dzmpois); R_RegisterCCallable("actuar", "pzmpois", (DL_FUNC) pzmpois); R_RegisterCCallable("actuar", "qzmpois", (DL_FUNC) qzmpois); R_RegisterCCallable("actuar", "rzmpois", (DL_FUNC) rzmpois); R_RegisterCCallable("actuar", "dzmgeom", (DL_FUNC) dzmgeom); R_RegisterCCallable("actuar", "pzmgeom", (DL_FUNC) pzmgeom); R_RegisterCCallable("actuar", "qzmgeom", (DL_FUNC) qzmgeom); R_RegisterCCallable("actuar", "rzmgeom", (DL_FUNC) rzmgeom); R_RegisterCCallable("actuar", "dpoisinvgauss", (DL_FUNC) dpoisinvgauss); R_RegisterCCallable("actuar", "ppoisinvgauss", (DL_FUNC) ppoisinvgauss); R_RegisterCCallable("actuar", "qpoisinvgauss", (DL_FUNC) qpoisinvgauss); R_RegisterCCallable("actuar", "rpoisinvgauss", (DL_FUNC) rpoisinvgauss); /* three parameter distributions */ R_RegisterCCallable("actuar", "dburr", (DL_FUNC) dburr); R_RegisterCCallable("actuar", "pburr", (DL_FUNC) pburr); R_RegisterCCallable("actuar", "qburr", (DL_FUNC) qburr); R_RegisterCCallable("actuar", "rburr", (DL_FUNC) rburr); R_RegisterCCallable("actuar", "mburr", (DL_FUNC) mburr); R_RegisterCCallable("actuar", "levburr", (DL_FUNC) levburr); R_RegisterCCallable("actuar", "dgenpareto", (DL_FUNC) dgenpareto); R_RegisterCCallable("actuar", "pgenpareto", (DL_FUNC) pgenpareto); R_RegisterCCallable("actuar", "qgenpareto", (DL_FUNC) qgenpareto); R_RegisterCCallable("actuar", "rgenpareto", (DL_FUNC) rgenpareto); R_RegisterCCallable("actuar", "mgenpareto", (DL_FUNC) mgenpareto); R_RegisterCCallable("actuar", "levgenpareto", (DL_FUNC) levgenpareto); R_RegisterCCallable("actuar", "dinvburr", (DL_FUNC) dinvburr); R_RegisterCCallable("actuar", "pinvburr", (DL_FUNC) pinvburr); R_RegisterCCallable("actuar", "qinvburr", (DL_FUNC) qinvburr); R_RegisterCCallable("actuar", "rinvburr", (DL_FUNC) rinvburr); R_RegisterCCallable("actuar", "minvburr", (DL_FUNC) minvburr); R_RegisterCCallable("actuar", "levinvburr", (DL_FUNC) levinvburr); R_RegisterCCallable("actuar", "dinvtrgamma", (DL_FUNC) dinvtrgamma); R_RegisterCCallable("actuar", "pinvtrgamma", (DL_FUNC) pinvtrgamma); R_RegisterCCallable("actuar", "qinvtrgamma", (DL_FUNC) qinvtrgamma); R_RegisterCCallable("actuar", "rinvtrgamma", (DL_FUNC) rinvtrgamma); R_RegisterCCallable("actuar", "minvtrgamma", (DL_FUNC) minvtrgamma); R_RegisterCCallable("actuar", "levinvtrgamma", (DL_FUNC) levinvtrgamma); R_RegisterCCallable("actuar", "dtrgamma", (DL_FUNC) dtrgamma); R_RegisterCCallable("actuar", "ptrgamma", (DL_FUNC) ptrgamma); R_RegisterCCallable("actuar", "qtrgamma", (DL_FUNC) qtrgamma); R_RegisterCCallable("actuar", "rtrgamma", (DL_FUNC) rtrgamma); R_RegisterCCallable("actuar", "mtrgamma", (DL_FUNC) mtrgamma); R_RegisterCCallable("actuar", "levtrgamma", (DL_FUNC) levtrgamma); R_RegisterCCallable("actuar", "dpareto2", (DL_FUNC) dpareto2); R_RegisterCCallable("actuar", "ppareto2", (DL_FUNC) ppareto2); R_RegisterCCallable("actuar", "qpareto2", (DL_FUNC) qpareto2); R_RegisterCCallable("actuar", "rpareto2", (DL_FUNC) rpareto2); R_RegisterCCallable("actuar", "mpareto2", (DL_FUNC) mpareto2); R_RegisterCCallable("actuar", "levpareto2", (DL_FUNC) levpareto2); R_RegisterCCallable("actuar", "dpareto3", (DL_FUNC) dpareto3); R_RegisterCCallable("actuar", "ppareto3", (DL_FUNC) ppareto3); R_RegisterCCallable("actuar", "qpareto3", (DL_FUNC) qpareto3); R_RegisterCCallable("actuar", "rpareto3", (DL_FUNC) rpareto3); R_RegisterCCallable("actuar", "mpareto3", (DL_FUNC) mpareto3); R_RegisterCCallable("actuar", "levpareto3", (DL_FUNC) levpareto3); R_RegisterCCallable("actuar", "dzmnbinom", (DL_FUNC) dzmnbinom); R_RegisterCCallable("actuar", "pzmnbinom", (DL_FUNC) pzmnbinom); R_RegisterCCallable("actuar", "qzmnbinom", (DL_FUNC) qzmnbinom); R_RegisterCCallable("actuar", "rzmnbinom", (DL_FUNC) rzmnbinom); R_RegisterCCallable("actuar", "dzmbinom", (DL_FUNC) dzmbinom); R_RegisterCCallable("actuar", "pzmbinom", (DL_FUNC) pzmbinom); R_RegisterCCallable("actuar", "qzmbinom", (DL_FUNC) qzmbinom); R_RegisterCCallable("actuar", "rzmbinom", (DL_FUNC) rzmbinom); /* four parameter distributions */ R_RegisterCCallable("actuar", "dtrbeta", (DL_FUNC) dtrbeta); R_RegisterCCallable("actuar", "ptrbeta", (DL_FUNC) ptrbeta); R_RegisterCCallable("actuar", "qtrbeta", (DL_FUNC) qtrbeta); R_RegisterCCallable("actuar", "rtrbeta", (DL_FUNC) rtrbeta); R_RegisterCCallable("actuar", "mtrbeta", (DL_FUNC) mtrbeta); R_RegisterCCallable("actuar", "levtrbeta", (DL_FUNC) levtrbeta); R_RegisterCCallable("actuar", "dgenbeta", (DL_FUNC) dgenbeta); R_RegisterCCallable("actuar", "pgenbeta", (DL_FUNC) pgenbeta); R_RegisterCCallable("actuar", "qgenbeta", (DL_FUNC) qgenbeta); R_RegisterCCallable("actuar", "rgenbeta", (DL_FUNC) rgenbeta); R_RegisterCCallable("actuar", "mgenbeta", (DL_FUNC) mgenbeta); R_RegisterCCallable("actuar", "levgenbeta", (DL_FUNC) levgenbeta); R_RegisterCCallable("actuar", "dpareto4", (DL_FUNC) dpareto4); R_RegisterCCallable("actuar", "ppareto4", (DL_FUNC) ppareto4); R_RegisterCCallable("actuar", "qpareto4", (DL_FUNC) qpareto4); R_RegisterCCallable("actuar", "rpareto4", (DL_FUNC) rpareto4); R_RegisterCCallable("actuar", "mpareto4", (DL_FUNC) mpareto4); R_RegisterCCallable("actuar", "levpareto4", (DL_FUNC) levpareto4); /* five parameter distributions */ R_RegisterCCallable("actuar", "dfpareto", (DL_FUNC) dfpareto); R_RegisterCCallable("actuar", "pfpareto", (DL_FUNC) pfpareto); R_RegisterCCallable("actuar", "qfpareto", (DL_FUNC) qfpareto); R_RegisterCCallable("actuar", "rfpareto", (DL_FUNC) rfpareto); R_RegisterCCallable("actuar", "mfpareto", (DL_FUNC) mfpareto); R_RegisterCCallable("actuar", "levfpareto", (DL_FUNC) levfpareto); /* phase-type distributions */ R_RegisterCCallable("actuar", "dphtype", (DL_FUNC) dphtype); R_RegisterCCallable("actuar", "pphtype", (DL_FUNC) pphtype); R_RegisterCCallable("actuar", "rphtype", (DL_FUNC) rphtype); R_RegisterCCallable("actuar", "mphtype", (DL_FUNC) mphtype); R_RegisterCCallable("actuar", "mgfphtype", (DL_FUNC) mgfphtype); /* special integrals */ R_RegisterCCallable("actuar", "betaint", (DL_FUNC) betaint); } /* Define imports from package expint */ double(*actuar_gamma_inc)(double,double); actuar/src/exp.c0000644000176200001440000000307015147745722013276 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to calculate raw and limited moments for the Exponential * distribution. See ../R/ExponentialSupp.R for details. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double mexp(double order, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(scale)) return order + scale; #endif if (!R_FINITE(scale) || !R_FINITE(order) || scale <= 0.0) return R_NaN; if (order <= -1.0) return R_PosInf; return R_pow(scale, order) * gammafn(1.0 + order); } double levexp(double limit, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(scale) || ISNAN(order)) return limit + scale + order; #endif if (!R_FINITE(scale) || !R_FINITE(order) || scale <= 0.0) return R_NaN; if (order <= -1.0) return R_PosInf; if (limit <= 0.0) return 0.0; double u, tmp; tmp = 1.0 + order; u = exp(log(limit) - log(scale)); return R_pow(scale, order) * gammafn(tmp) * pgamma(u, tmp, 1.0, 1, 0) + ACT_DLIM__0(limit, order) * exp(-u); } double mgfexp(double t, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(scale)) return t + scale; #endif if (!R_FINITE(scale) || scale <= 0.0 || scale * t > 1.0) return R_NaN; if (t == 0.0) return ACT_D__1; return ACT_D_exp(-log1p(-scale * t)); } actuar/src/invgamma.c0000644000176200001440000000750415147745722014307 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Inverse Gamma distribution. See ../R/InverseGamma.R for * details. * * We work with the density expressed as * * u^shape * e^(-u) / (x * gamma(shape)) * * with u = scale/x. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dinvgamma(double x, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(scale)) return x + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale < 0.0) return R_NaN; /* handle also x == 0 here */ if (!R_FINITE(x) || x <= 0.0) return ACT_D__0; double logu = log(scale) - log(x); return ACT_D_exp(shape * logu - exp(logu) - log(x) - lgammafn(shape)); } double pinvgamma(double q, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(scale)) return q + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale < 0.0) return R_NaN;; if (q <= 0) return ACT_DT_0; double u = exp(log(scale) - log(q)); return pgamma(u, shape, 1.0, !lower_tail, log_p); } double qinvgamma(double p, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(scale)) return p + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN;; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale / qgamma(p, shape, 1.0, !lower_tail, 0); } double rinvgamma(double shape, double scale) { if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN;; return scale / rgamma(shape, 1.0); } double minvgamma(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order >= shape) return R_PosInf; return R_pow(scale, order) * gammafn(shape - order) / gammafn(shape); } double levinvgamma(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order >= shape) return R_PosInf; if (limit <= 0.0) return 0.0; double u = exp(log(scale) - log(limit)); return R_pow(scale, order) * actuar_gamma_inc(shape - order, u) / gammafn(shape) + ACT_DLIM__0(limit, order) * pgamma(u, shape, 1.0, 1, 0); } double mgfinvgamma(double t, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(shape) || ISNAN(scale)) return t + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0 || t > 0.0 ) return R_NaN; if (t == 0.0) return ACT_D__1; /* rescale and change sign */ t = -scale * t; return ACT_D_exp(M_LN2 + 0.5 * shape * log(t) + log(bessel_k(sqrt(4 * t), shape, 1)) - lgammafn(shape)); } actuar/src/paralogis.c0000644000176200001440000000746015147745722014472 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the paralogistic distribution. See ../R/Paralogistic.R for * details. * * We work with the density expressed as * * shape^2 * u^shape * (1 - u) / x * * with u = 1/(1 + v), v = (x/scale)^shape. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dparalogis(double x, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(scale)) return x + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape < 1) return R_PosInf; if (shape > 1) return ACT_D__0; /* else */ return ACT_D_val(1.0/scale); } double logv, logu, log1mu; logv = shape * (log(x) - log(scale)); logu = - log1pexp(logv); log1mu = - log1pexp(-logv); return ACT_D_exp(2.0 * log(shape) + shape * logu + log1mu - log(x)); } double pparalogis(double q, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(scale)) return q + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double u = exp(-log1pexp(shape * (log(q) - log(scale)))); return ACT_DT_Cval(R_pow(u, shape)); } double qparalogis(double p, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(scale)) return p + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); double tmp = 1.0/shape; return scale * R_pow(R_pow(ACT_D_Cval(p), -tmp) - 1.0, tmp); } double rparalogis(double shape, double scale) { double tmp; if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; tmp = 1.0/shape; return scale * R_pow(R_pow(unif_rand(), -tmp) - 1.0, tmp); } double mparalogis(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape || order >= shape * shape) return R_PosInf; double tmp = order / shape; return R_pow(scale, order) * gammafn(1.0 + tmp) * gammafn(shape - tmp) / gammafn(shape); } double levparalogis(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape) return R_PosInf; if (limit <= 0.0) return 0.0; double logv, u, u1m; double tmp = order / shape; logv = shape * (log(limit) - log(scale)); u = exp(-log1pexp(logv)); u1m = exp(-log1pexp(-logv)); return R_pow(scale, order) * betaint_raw(u1m, 1.0 + tmp, shape - tmp, u) / gammafn(shape) + ACT_DLIM__0(limit, order) * R_pow(u, shape); } actuar/src/Makevars0000644000176200001440000000026215147745722014032 0ustar liggesusers## We use the BLAS and the LAPACK libraries PKG_LIBS = $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) ## Hide entry points (but for R_init_actuar in init.c) PKG_CFLAGS = $(C_VISIBILITY) actuar/src/qDiscrete_search.h0000644000176200001440000001127515151206331015744 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Find quantiles for discrete distributions using the Cornish-Fisher * Expansion. * * This file is a copy of src/nmath/qDiscrete_search.h of R sources, * but with the following minor changes: * * 1. all debugging material is deleted; * 2. the macro function q_DISCRETE_01_CHECKS(), which does not serve * any purpose without the debugging material is deleted; * 3. the declaration of variables in q_DISCRETE_BODY() is moved into * a separate macro; see comments marked 'VG' below. * * AUTHOR: Vincent Goulet * based on code from the R Core Team */ /* This is #included from ./logarithmic.c and ./poisinvgauss.c */ #define PST_0(a, b) a ## b #define PASTE(a, b) PST_0(a, b) #define _pDIST_ PASTE(p, _thisDIST_) #define _qDIST_ PASTE(q, _thisDIST_) #ifdef MATHLIB_STANDALONE # define MAYBE_R_CheckUserInterrupt() #else # define MAYBE_R_CheckUserInterrupt() R_CheckUserInterrupt() #endif #define DO_SEARCH_FUN(...) \ do_search(double y, double *z, double p, __VA_ARGS__, \ double incr, int lower_tail, int log_p) #define DO_SEARCH_(Y_, incr_, ...) \ do_search(Y_, &z, p, __VA_ARGS__, incr_, lower_tail, log_p) #define P_DIST(Y_, ...) _pDIST_(Y_, __VA_ARGS__, lower_tail, log_p) static double DO_SEARCH_FUN(_dist_PARS_DECL_) { Rboolean left = (lower_tail ? (*z >= p) : (*z < p)); if(left) { // (lower_tail, *z >= p) or (upper tail, *z < p): search to the __left__ for(int iter = 0; ; iter++) { double newz = -1.; // -Wall #ifndef MATHLIB_STANDALONE if(iter % 10000 == 0) R_CheckUserInterrupt();// have seen inf.loops #endif if(y > 0) newz = P_DIST(y - incr, _dist_PARS_); else if(y < 0) y = 0; // note that newz may be NaN because of remaining border line bugs in _pDIST_() {bug from pbeta()} if(y == 0 || ISNAN(newz) || (lower_tail ? (newz < p) : (newz >= p))) { return y; // and previous *z } y = fmax2(0, y - incr); *z = newz; } } else { // (lower_tail, *z < p) or (upper tail, *z >= p): search to the __right__ for(int iter = 0; ; iter++) { #ifndef MATHLIB_STANDALONE if(iter % 10000 == 0) R_CheckUserInterrupt(); #endif y += incr; *z = P_DIST(y, _dist_PARS_); if(ISNAN(*z) || (lower_tail ? (*z >= p) : (*z < p))) { return y; } } } } // do_search() #define q_DISCR_CHECK_BOUNDARY(Y_) if(Y_ < 0) Y_ = 0. /* VG: the Poisson-inverse gaussian requires different declarations * for a limiting case. Therefore, the standard declaration is taken * out of the q_DISCRETE_BODY() macro found in R sources. */ #define q_DISCRETE_DECL \ double \ z = qnorm(p, 0., 1., lower_tail, log_p), \ y = ACT_forceint(mu + sigma * (z + gamma * (z*z - 1) / 6)) #define q_DISCRETE_BODY() do { \ q_DISCR_CHECK_BOUNDARY(y); \ \ z = P_DIST(y, _dist_PARS_); \ \ /* Algorithmic "tuning parameters", used to be hardwired; changed for speed &| precision */ \ const double \ _pf_n_ = 8, /* was hardwired to 64 */ \ _pf_L_ = 2, /* was hardwired to 64 */ \ _yLarge_ = 4096, /* was hardwired to 1e5 */ \ _incF_ = (1./64),/* was hardwired to 0.001 (= 1./1000 ) */ \ _iShrink_ = 8, /* was hardwired to 100 */ \ _relTol_ = 1e-15,/* was hardwired to 1e-15 */ \ _xf_ = 4; /* extra factor, *must* be >= 1 (new anyway) */ \ \ /* fuzz to ensure left continuity: do not loose too much (=> error in upper tail) */ \ if(log_p) { /* <==> p \in [-Inf, 0] different adjustment: "other sign" */ \ double e = _pf_L_ * DBL_EPSILON; \ if(lower_tail && p > - DBL_MAX) /* prevent underflow to -Inf */ \ p *= 1 + e; \ else /* if(p < - DBL_MIN) // not too close to -0 */ \ p *= 1 - e; \ \ } else { /* not log scale */ \ double e = _pf_n_ * DBL_EPSILON; \ if(lower_tail) \ p *= 1 - e; \ else if(1 - p > _xf_*e) /* otherwise get p > 1 */ \ p *= 1 + e; \ } \ \ /* If the C-F value y is not too large a simple search is OK */ \ if(y < _yLarge_) return DO_SEARCH_(y, 1, _dist_PARS_); \ /* Otherwise be a bit cleverer in the search: use larger increments, notably initially: */ \ { /* y >= _yLarge_ */ \ double oldincr, incr = floor(y * _incF_); \ int qIt = 0; \ \ do { \ oldincr = incr; \ y = DO_SEARCH_(y, incr, _dist_PARS_); /* also updating *z */ \ if(++qIt % 10000 == 0) MAYBE_R_CheckUserInterrupt(); \ incr = fmax2(1, floor(incr / _iShrink_)); \ } while(oldincr > 1 && incr > y * _relTol_); \ return y; \ } \ } while(0) actuar/src/dpq.c0000644000176200001440000012061615147745722013274 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability density, cumulative probability * quantile functions and moment generating functions, raw moments * and limited moments for some probability laws not in base R (or * those quantities not provided in base R). Function .External() * calls actuar_do_dpq() with arguments: * * 1. the name of the distribution, with a "d", a "p" or "q" * prepended to it (e.g. "dpareto", "pburr"); * 2. the value(s) where the function is to be evaluated; * 3:x. the parameters of the distribution (including the order of * the limited moment for lev*); * x+1. whether to return the lower or upper tail probability or * quantile (p* and q* only); see note below for m* and lev* * functions; * x+2. whether to return probability in log scale or the cumulant * generating function (d*, p*, q* and mgf* only). * * Function actuar_do_dpq() will extract the name of the distribution, look * up in table dpq_tab defined in names.c which of actuar_do_dpq{1,2,3,4} * should take care of the calculation and dispatch to this function. * In turn, functions actuar_do_dpq{1,2,3,4} call function * {d,p,q,m,lev,mgf}dist() to get actual values from distribution * "dist". * * Note: the m* and lev* functions came later in the process. In * order to easily fit them into this system, I have decided to leave * an unused 'give_log' argument in the C definitions of these * functions. Otherwise, this would have required defining functions * dpq{1,2,3,4,5}_0() below. * * Functions therein are essentially identical to those found in * src/main/arithmetic.c of R sources [at the time or writing] with a * different naming scheme. * * To add a new distribution: write a {d,p,q,m,lev,mgf}dist() * function, add an entry in names.c and in the definition of the * corresponding actuar_do_dpq{1,2,3,4,6} function, declare the * function in actuar.h. * * Adapted from src/main/arithmetic.c of R sources. * * AUTHOR: Vincent Goulet * with much indirect help from the R Core Team */ #include #include #include "actuar.h" #include "locale.h" /* Prototypes of auxiliary functions */ static SEXP dpq1_1(SEXP, SEXP, SEXP, double (*f)(double, double, int)); static SEXP dpq1_2(SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, int, int)); static SEXP dpq2_1(SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, int)); static SEXP dpq2_2(SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, int, int)); static SEXP dpq2_5(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, int, int, double, int, int)); static SEXP dpq3_1(SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, double, int)); static SEXP dpq3_2(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, double, int, int)); static SEXP dpq4_1(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, double, double, int)); static SEXP dpq4_2(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, double, double, int, int)); static SEXP dpq5_1(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, double, double, double, int)); static SEXP dpq5_2(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, double, double, double, int, int)); static SEXP dpq6_1(SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double, double, double, double, double, double, int)); /* Additional access macros */ #define CAD5R(e) CAR(CDR(CDR(CDR(CDR(CDR(e)))))) #define CAD6R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(e))))))) #define CAD7R(e) CAR(CDR(CDR(CDR(CDR(CDR(CDR(CDR(e)))))))) /* Functions for one parameter distributions */ #define if_NA_dpq1_set(y, x, a) \ if (ISNA (x) || ISNA (a)) y = NA_REAL; \ else if (ISNAN(x) || ISNAN(a)) y = R_NaN; #define mod_iterate1(n1, n2, i1, i2) \ for (i = i1 = i2 = 0; i < n; \ i1 = (++i1 == n1) ? 0 : i1, \ i2 = (++i2 == n2) ? 0 : i2, \ ++i) static SEXP dpq1_1(SEXP sx, SEXP sa, SEXP sI, double (*f)(double, double, int)) { SEXP sy; R_xlen_t i, ix, ia, n, nx, na; double xi, ai, *x, *a, *y; Rboolean naflag = FALSE; #define SETUP_DPQ1 \ if (!isNumeric(sx) || !isNumeric(sa)) \ error(_("invalid arguments")); \ \ nx = XLENGTH(sx); \ na = XLENGTH(sa); \ if ((nx == 0) || (na == 0)) \ return(allocVector(REALSXP, 0)); \ n = (nx < na) ? na : nx; \ PROTECT(sx = coerceVector(sx, REALSXP)); \ PROTECT(sa = coerceVector(sa, REALSXP)); \ PROTECT(sy = allocVector(REALSXP, n)); \ x = REAL(sx); \ a = REAL(sa); \ y = REAL(sy) SETUP_DPQ1; int i_1 = asInteger(sI); mod_iterate1(nx, na, ix, ia) { xi = x[ix]; ai = a[ia]; if_NA_dpq1_set(y[i], xi, ai) else { y[i] = f(xi, ai, i_1); if (ISNAN(y[i])) naflag = TRUE; } } #define FINISH_DPQ1 \ if (naflag) \ warning(R_MSG_NA); \ \ if (n == nx) \ SHALLOW_DUPLICATE_ATTRIB(sy, sx); \ else if (n == na) \ SHALLOW_DUPLICATE_ATTRIB(sy, sa); \ UNPROTECT(3) FINISH_DPQ1; return sy; } static SEXP dpq1_2(SEXP sx, SEXP sa, SEXP sI, SEXP sJ, double (*f)(double, double, int, int)) { SEXP sy; R_xlen_t i, ix, ia, n, nx, na; double xi, ai, *x, *a, *y; Rboolean naflag = FALSE; SETUP_DPQ1; int i_1 = asInteger(sI), i_2 = asInteger(sJ); mod_iterate1(nx, na, ix, ia) { xi = x[ix]; ai = a[ia]; if_NA_dpq1_set(y[i], xi, ai) else { y[i] = f(xi, ai, i_1, i_2); if (ISNAN(y[i])) naflag = TRUE; } } FINISH_DPQ1; return sy; } #define DPQ1_1(A, FUN) dpq1_1(CAR(A), CADR(A), CADDR(A), FUN); #define DPQ1_2(A, FUN) dpq1_2(CAR(A), CADR(A), CADDR(A), CADDDR(A), FUN) SEXP actuar_do_dpq1(int code, SEXP args) { switch (code) { case 1: return DPQ1_1(args, mexp); case 2: return DPQ1_1(args, dinvexp); case 3: return DPQ1_2(args, pinvexp); case 4: return DPQ1_2(args, qinvexp); case 5: return DPQ1_1(args, minvexp); case 6: return DPQ1_1(args, mgfexp); case 101: return DPQ1_1(args, dlogarithmic); case 102: return DPQ1_2(args, plogarithmic); case 103: return DPQ1_2(args, qlogarithmic); case 104: return DPQ1_1(args, dztpois); case 105: return DPQ1_2(args, pztpois); case 106: return DPQ1_2(args, qztpois); case 107: return DPQ1_1(args, dztgeom); case 108: return DPQ1_2(args, pztgeom); case 109: return DPQ1_2(args, qztgeom); default: error(_("internal error in actuar_do_dpq1")); } return args; /* never used; to keep -Wall happy */ } /* Functions for two parameter distributions */ #define if_NA_dpq2_set(y, x, a, b) \ if (ISNA (x) || ISNA (a) || ISNA (b)) y = NA_REAL; \ else if (ISNAN(x) || ISNAN(a) || ISNAN(b)) y = R_NaN; #define mod_iterate2(n1, n2, n3, i1, i2, i3) \ for (i = i1 = i2 = i3 = 0; i < n; \ i1 = (++i1 == n1) ? 0 : i1, \ i2 = (++i2 == n2) ? 0 : i2, \ i3 = (++i3 == n3) ? 0 : i3, \ ++i) static SEXP dpq2_1(SEXP sx, SEXP sa, SEXP sb, SEXP sI, double (*f)(double, double, double, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, n, nx, na, nb; double xi, ai, bi, *x, *a, *b, *y; Rboolean naflag = FALSE; #define SETUP_DPQ2 \ if (!isNumeric(sx) || !isNumeric(sa) || !isNumeric(sb)) \ error(_("invalid arguments")); \ \ nx = XLENGTH(sx); \ na = XLENGTH(sa); \ nb = XLENGTH(sb); \ if ((nx == 0) || (na == 0) || (nb == 0)) \ return(allocVector(REALSXP, 0)); \ n = nx; \ if (n < na) n = na; \ if (n < nb) n = nb; \ PROTECT(sx = coerceVector(sx, REALSXP)); \ PROTECT(sa = coerceVector(sa, REALSXP)); \ PROTECT(sb = coerceVector(sb, REALSXP)); \ PROTECT(sy = allocVector(REALSXP, n)); \ x = REAL(sx); \ a = REAL(sa); \ b = REAL(sb); \ y = REAL(sy) SETUP_DPQ2; int i_1 = asInteger(sI); mod_iterate2(nx, na, nb, ix, ia, ib) { xi = x[ix]; ai = a[ia]; bi = b[ib]; if_NA_dpq2_set(y[i], xi, ai, bi) else { y[i] = f(xi, ai, bi, i_1); if (ISNAN(y[i])) naflag = TRUE; } } #define FINISH_DPQ2 \ if (naflag) \ warning(R_MSG_NA); \ \ if (n == nx) \ SHALLOW_DUPLICATE_ATTRIB(sy, sx); \ else if (n == na) \ SHALLOW_DUPLICATE_ATTRIB(sy, sa); \ else if (n == nb) \ SHALLOW_DUPLICATE_ATTRIB(sy, sb); \ UNPROTECT(4) FINISH_DPQ2; return sy; } static SEXP dpq2_2(SEXP sx, SEXP sa, SEXP sb, SEXP sI, SEXP sJ, double (*f)(double, double, double, int, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, n, nx, na, nb; double xi, ai, bi, *x, *a, *b, *y; Rboolean naflag = FALSE; SETUP_DPQ2; int i_1 = asInteger(sI), i_2 = asInteger(sJ); mod_iterate2(nx, na, nb, ix, ia, ib) { xi = x[ix]; ai = a[ia]; bi = b[ib]; if_NA_dpq2_set(y[i], xi, ai, bi) else { y[i] = f(xi, ai, bi, i_1, i_2); if (ISNAN(y[i])) naflag = TRUE; } } FINISH_DPQ2; return sy; } /* This is needed for qinvgauss that has three additional parameters * for the tolerance, the maximum number of iterations and echoing of * the iterations. */ static SEXP dpq2_5(SEXP sx, SEXP sa, SEXP sb, SEXP sI, SEXP sJ, SEXP sT, SEXP sM, SEXP sE, double (*f)(double, double, double, int, int, double, int, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, n, nx, na, nb; double xi, ai, bi, *x, *a, *b, *y; Rboolean naflag = FALSE; SETUP_DPQ2; int i_1 = asInteger(sI), i_2 = asInteger(sJ), i_4 = asInteger(sM), i_5 = asInteger(sE); double d_3 = asReal(sT); mod_iterate2(nx, na, nb, ix, ia, ib) { xi = x[ix]; ai = a[ia]; bi = b[ib]; if_NA_dpq2_set(y[i], xi, ai, bi) else { y[i] = f(xi, ai, bi, i_1, i_2, d_3, i_4, i_5); if (ISNAN(y[i])) naflag = TRUE; } } FINISH_DPQ2; return sy; } #define DPQ2_1(A, FUN) dpq2_1(CAR(A), CADR(A), CADDR(A), CADDDR(A), FUN); #define DPQ2_2(A, FUN) dpq2_2(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), FUN) #define DPQ2_5(A, FUN) dpq2_5(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), CAD5R(A), CAD6R(A), CAD7R(A), FUN) SEXP actuar_do_dpq2(int code, SEXP args) { switch (code) { case 1: return DPQ2_1(args, mgamma); case 2: return DPQ2_1(args, dinvgamma); case 3: return DPQ2_2(args, pinvgamma); case 4: return DPQ2_2(args, qinvgamma); case 5: return DPQ2_1(args, minvgamma); case 6: return DPQ2_1(args, dinvparalogis); case 7: return DPQ2_2(args, pinvparalogis); case 8: return DPQ2_2(args, qinvparalogis); case 9: return DPQ2_1(args, minvparalogis); case 10: return DPQ2_1(args, dinvpareto); case 11: return DPQ2_2(args, pinvpareto); case 12: return DPQ2_2(args, qinvpareto); case 13: return DPQ2_1(args, minvpareto); case 14: return DPQ2_1(args, dinvweibull); case 15: return DPQ2_2(args, pinvweibull); case 16: return DPQ2_2(args, qinvweibull); case 17: return DPQ2_1(args, minvweibull); case 18: return DPQ2_1(args, dlgamma); case 19: return DPQ2_2(args, plgamma); case 20: return DPQ2_2(args, qlgamma); case 21: return DPQ2_1(args, mlgamma); case 22: return DPQ2_1(args, dllogis); case 23: return DPQ2_2(args, pllogis); case 24: return DPQ2_2(args, qllogis); case 25: return DPQ2_1(args, mllogis); case 26: return DPQ2_1(args, mlnorm); case 27: return DPQ2_1(args, dparalogis); case 28: return DPQ2_2(args, pparalogis); case 29: return DPQ2_2(args, qparalogis); case 30: return DPQ2_1(args, mparalogis); case 31: return DPQ2_1(args, dpareto); case 32: return DPQ2_2(args, ppareto); case 33: return DPQ2_2(args, qpareto); case 34: return DPQ2_1(args, mpareto); case 35: return DPQ2_1(args, dpareto1); case 36: return DPQ2_2(args, ppareto1); case 37: return DPQ2_2(args, qpareto1); case 38: return DPQ2_1(args, mpareto1); case 39: return DPQ2_1(args, mweibull); case 40: return DPQ2_1(args, levexp); case 41: return DPQ2_1(args, levinvexp); case 42: return DPQ2_1(args, mbeta); case 43: return DPQ2_1(args, mgfgamma); case 44: return DPQ2_1(args, mgfnorm); case 45: return DPQ2_1(args, mgfunif); case 46: return DPQ2_1(args, mgfinvgamma); case 47: return DPQ2_1(args, mnorm); case 48: return DPQ2_1(args, mchisq); case 49: return DPQ2_1(args, mgfchisq); /* case 50: return DPQ2_1(args, minvGauss); [defunct v3.0-0] */ /* case 51: return DPQ2_1(args, mgfinvGauss); [defunct v3.0-0] */ case 52: return DPQ2_1(args, munif); case 53: return DPQ2_1(args, dgumbel); case 54: return DPQ2_2(args, pgumbel); case 55: return DPQ2_2(args, qgumbel); case 56: return DPQ2_1(args, mgumbel); case 57: return DPQ2_1(args, mgfgumbel); case 58: return DPQ2_1(args, dinvgauss); case 59: return DPQ2_2(args, pinvgauss); case 60: return DPQ2_5(args, qinvgauss); case 61: return DPQ2_1(args, minvgauss); case 62: return DPQ2_1(args, mgfinvgauss); case 101: return DPQ2_1(args, dztnbinom); case 102: return DPQ2_2(args, pztnbinom); case 103: return DPQ2_2(args, qztnbinom); case 104: return DPQ2_1(args, dztbinom); case 105: return DPQ2_2(args, pztbinom); case 106: return DPQ2_2(args, qztbinom); case 107: return DPQ2_1(args, dzmlogarithmic); case 108: return DPQ2_2(args, pzmlogarithmic); case 109: return DPQ2_2(args, qzmlogarithmic); case 110: return DPQ2_1(args, dzmpois); case 111: return DPQ2_2(args, pzmpois); case 112: return DPQ2_2(args, qzmpois); case 113: return DPQ2_1(args, dzmgeom); case 114: return DPQ2_2(args, pzmgeom); case 115: return DPQ2_2(args, qzmgeom); case 116: return DPQ2_1(args, dpoisinvgauss); case 117: return DPQ2_2(args, ppoisinvgauss); case 118: return DPQ2_2(args, qpoisinvgauss); default: error(_("internal error in actuar_do_dpq2")); } return args; /* never used; to keep -Wall happy */ } /* Functions for three parameter distributions */ #define if_NA_dpq3_set(y, x, a, b, c) \ if (ISNA (x) || ISNA (a) || ISNA (b) || ISNA (c)) y = NA_REAL; \ else if (ISNAN(x) || ISNAN(a) || ISNAN(b) || ISNAN(c)) y = R_NaN; #define mod_iterate3(n1, n2, n3, n4, i1, i2, i3, i4) \ for (i = i1 = i2 = i3 = i4 = 0; i < n; \ i1 = (++i1 == n1) ? 0 : i1, \ i2 = (++i2 == n2) ? 0 : i2, \ i3 = (++i3 == n3) ? 0 : i3, \ i4 = (++i4 == n4) ? 0 : i4, \ ++i) static SEXP dpq3_1(SEXP sx, SEXP sa, SEXP sb, SEXP sc, SEXP sI, double (*f)(double, double, double, double, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, ic, n, nx, na, nb, nc; double xi, ai, bi, ci, *x, *a, *b, *c, *y; Rboolean naflag = FALSE; #define SETUP_DPQ3 \ if (!isNumeric(sx) || !isNumeric(sa) || \ !isNumeric(sb) || !isNumeric(sc)) \ error(_("invalid arguments")); \ \ nx = XLENGTH(sx); \ na = XLENGTH(sa); \ nb = XLENGTH(sb); \ nc = XLENGTH(sc); \ if ((nx == 0) || (na == 0) || (nb == 0) || (nc == 0)) \ return(allocVector(REALSXP, 0)); \ n = nx; \ if (n < na) n = na; \ if (n < nb) n = nb; \ if (n < nc) n = nc; \ PROTECT(sx = coerceVector(sx, REALSXP)); \ PROTECT(sa = coerceVector(sa, REALSXP)); \ PROTECT(sb = coerceVector(sb, REALSXP)); \ PROTECT(sc = coerceVector(sc, REALSXP)); \ PROTECT(sy = allocVector(REALSXP, n)); \ x = REAL(sx); \ a = REAL(sa); \ b = REAL(sb); \ c = REAL(sc); \ y = REAL(sy) SETUP_DPQ3; int i_1 = asInteger(sI); mod_iterate3(nx, na, nb, nc, ix, ia, ib, ic) { xi = x[ix]; ai = a[ia]; bi = b[ib]; ci = c[ic]; if_NA_dpq3_set(y[i], xi, ai, bi, ci) else { y[i] = f(xi, ai, bi, ci, i_1); if (ISNAN(y[i])) naflag = TRUE; } } #define FINISH_DPQ3 \ if (naflag) \ warning(R_MSG_NA); \ \ if (n == nx) \ SHALLOW_DUPLICATE_ATTRIB(sy, sx); \ else if (n == na) \ SHALLOW_DUPLICATE_ATTRIB(sy, sa); \ else if (n == nb) \ SHALLOW_DUPLICATE_ATTRIB(sy, sb); \ else if (n == nc) \ SHALLOW_DUPLICATE_ATTRIB(sy, sc); \ UNPROTECT(5) FINISH_DPQ3; return sy; } static SEXP dpq3_2(SEXP sx, SEXP sa, SEXP sb, SEXP sc, SEXP sI, SEXP sJ, double (*f)(double, double, double, double, int, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, ic, n, nx, na, nb, nc; double xi, ai, bi, ci, *x, *a, *b, *c, *y; Rboolean naflag = FALSE; SETUP_DPQ3; int i_1 = asInteger(sI), i_2 = asInteger(sJ); mod_iterate3(nx, na, nb, nc, ix, ia, ib, ic) { xi = x[ix]; ai = a[ia]; bi = b[ib]; ci = c[ic]; if_NA_dpq3_set(y[i], xi, ai, bi, ci) else { y[i] = f(xi, ai, bi, ci, i_1, i_2); if (ISNAN(y[i])) naflag = TRUE; } } FINISH_DPQ3; return sy; } #define DPQ3_1(A, FUN) dpq3_1(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), FUN); #define DPQ3_2(A, FUN) dpq3_2(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), CAD5R(A), FUN) SEXP actuar_do_dpq3(int code, SEXP args) { switch (code) { case 1: return DPQ3_1(args, dburr); case 2: return DPQ3_2(args, pburr); case 3: return DPQ3_2(args, qburr); case 4: return DPQ3_1(args, mburr); case 5: return DPQ3_1(args, dgenpareto); case 6: return DPQ3_2(args, pgenpareto); case 7: return DPQ3_2(args, qgenpareto); case 8: return DPQ3_1(args, mgenpareto); case 9: return DPQ3_1(args, dinvburr); case 10: return DPQ3_2(args, pinvburr); case 11: return DPQ3_2(args, qinvburr); case 12: return DPQ3_1(args, minvburr); case 13: return DPQ3_1(args, dinvtrgamma); case 14: return DPQ3_2(args, pinvtrgamma); case 15: return DPQ3_2(args, qinvtrgamma); case 16: return DPQ3_1(args, minvtrgamma); case 17: return DPQ3_1(args, dtrgamma); case 18: return DPQ3_2(args, ptrgamma); case 19: return DPQ3_2(args, qtrgamma); case 20: return DPQ3_1(args, mtrgamma); case 21: return DPQ3_1(args, levgamma); case 22: return DPQ3_1(args, levinvgamma); case 23: return DPQ3_1(args, levinvparalogis); case 24: return DPQ3_1(args, levinvpareto); case 25: return DPQ3_1(args, levinvweibull); case 26: return DPQ3_1(args, levlgamma); case 27: return DPQ3_1(args, levllogis); case 28: return DPQ3_1(args, levlnorm); case 29: return DPQ3_1(args, levparalogis); case 30: return DPQ3_1(args, levpareto); case 31: return DPQ3_1(args, levpareto1); case 32: return DPQ3_1(args, levweibull); case 33: return DPQ3_1(args, levbeta); case 34: return DPQ3_1(args, levchisq); /* case 35: return DPQ3_1(args, levinvGauss); [defunct v3.0-0] */ case 36: return DPQ3_1(args, levunif); case 37: return DPQ3_1(args, levinvgauss); case 38: return DPQ3_1(args, dpareto2); case 39: return DPQ3_2(args, ppareto2); case 40: return DPQ3_2(args, qpareto2); case 41: return DPQ3_1(args, mpareto2); case 42: return DPQ3_1(args, dpareto3); case 43: return DPQ3_2(args, ppareto3); case 44: return DPQ3_2(args, qpareto3); case 45: return DPQ3_1(args, mpareto3); case 101: return DPQ3_1(args, dzmnbinom); case 102: return DPQ3_2(args, pzmnbinom); case 103: return DPQ3_2(args, qzmnbinom); case 104: return DPQ3_1(args, dzmbinom); case 105: return DPQ3_2(args, pzmbinom); case 106: return DPQ3_2(args, qzmbinom); default: error(_("internal error in actuar_do_dpq3")); } return args; /* never used; to keep -Wall happy */ } /* Functions for four parameter distributions */ #define if_NA_dpq4_set(y, x, a, b, c, d) \ if (ISNA (x) || ISNA (a) || ISNA (b) || ISNA (c) || ISNA (d)) \ y = NA_REAL; \ else if (ISNAN(x) || ISNAN(a) || ISNAN(b) || ISNAN(c) || ISNAN(d)) \ y = R_NaN; #define mod_iterate4(n1, n2, n3, n4, n5, i1, i2, i3, i4, i5) \ for (i = i1 = i2 = i3 = i4 = i5 = 0; i < n; \ i1 = (++i1 == n1) ? 0 : i1, \ i2 = (++i2 == n2) ? 0 : i2, \ i3 = (++i3 == n3) ? 0 : i3, \ i4 = (++i4 == n4) ? 0 : i4, \ i5 = (++i5 == n5) ? 0 : i5, \ ++i) static SEXP dpq4_1(SEXP sx, SEXP sa, SEXP sb, SEXP sc, SEXP sd, SEXP sI, double (*f)(double, double, double, double, double, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, ic, id, n, nx, na, nb, nc, nd; double xi, ai, bi, ci, di, *x, *a, *b, *c, *d, *y; Rboolean naflag = FALSE; #define SETUP_DPQ4 \ if (!isNumeric(sx) || !isNumeric(sa) || !isNumeric(sb) || \ !isNumeric(sc) || !isNumeric(sd)) \ error(_("invalid arguments")); \ \ nx = XLENGTH(sx); \ na = XLENGTH(sa); \ nb = XLENGTH(sb); \ nc = XLENGTH(sc); \ nd = XLENGTH(sd); \ if ((nx == 0) || (na == 0) || (nb == 0) || \ (nc == 0) || (nd == 0)) \ return(allocVector(REALSXP, 0)); \ n = nx; \ if (n < na) n = na; \ if (n < nb) n = nb; \ if (n < nc) n = nc; \ if (n < nd) n = nd; \ PROTECT(sx = coerceVector(sx, REALSXP)); \ PROTECT(sa = coerceVector(sa, REALSXP)); \ PROTECT(sb = coerceVector(sb, REALSXP)); \ PROTECT(sc = coerceVector(sc, REALSXP)); \ PROTECT(sd = coerceVector(sd, REALSXP)); \ PROTECT(sy = allocVector(REALSXP, n)); \ x = REAL(sx); \ a = REAL(sa); \ b = REAL(sb); \ c = REAL(sc); \ d = REAL(sd); \ y = REAL(sy) SETUP_DPQ4; int i_1 = asInteger(sI); mod_iterate4(nx, na, nb, nc, nd, ix, ia, ib, ic, id) { xi = x[ix]; ai = a[ia]; bi = b[ib]; ci = c[ic]; di = d[id]; if_NA_dpq4_set(y[i], xi, ai, bi, ci, di) else { y[i] = f(xi, ai, bi, ci, di, i_1); if (ISNAN(y[i])) naflag = TRUE; } } #define FINISH_DPQ4 \ if (naflag) \ warning(R_MSG_NA); \ \ if (n == nx) \ SHALLOW_DUPLICATE_ATTRIB(sy, sx); \ else if (n == na) \ SHALLOW_DUPLICATE_ATTRIB(sy, sa); \ else if (n == nb) \ SHALLOW_DUPLICATE_ATTRIB(sy, sb); \ else if (n == nc) \ SHALLOW_DUPLICATE_ATTRIB(sy, sc); \ else if (n == nd) \ SHALLOW_DUPLICATE_ATTRIB(sy, sd); \ UNPROTECT(6) FINISH_DPQ4; return sy; } static SEXP dpq4_2(SEXP sx, SEXP sa, SEXP sb, SEXP sc, SEXP sd, SEXP sI, SEXP sJ, double (*f)(double, double, double, double, double, int, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, ic, id, n, nx, na, nb, nc, nd; double xi, ai, bi, ci, di, *x, *a, *b, *c, *d, *y; Rboolean naflag = FALSE; SETUP_DPQ4; int i_1 = asInteger(sI), i_2 = asInteger(sJ); mod_iterate4(nx, na, nb, nc, nd, ix, ia, ib, ic, id) { xi = x[ix]; ai = a[ia]; bi = b[ib]; ci = c[ic]; di = d[id]; if_NA_dpq4_set(y[i], xi, ai, bi, ci, di) else { y[i] = f(xi, ai, bi, ci, di, i_1, i_2); if (ISNAN(y[i])) naflag = TRUE; } } FINISH_DPQ4; return sy; } #define DPQ4_1(A, FUN) dpq4_1(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), CAD5R(A), FUN); #define DPQ4_2(A, FUN) dpq4_2(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), CAD5R(A), CAD6R(A), FUN) SEXP actuar_do_dpq4(int code, SEXP args) { switch (code) { case 1: return DPQ4_1(args, dtrbeta); case 2: return DPQ4_2(args, ptrbeta); case 3: return DPQ4_2(args, qtrbeta); case 4: return DPQ4_1(args, mtrbeta); case 5: return DPQ4_1(args, levburr); case 6: return DPQ4_1(args, levgenpareto); case 7: return DPQ4_1(args, levinvburr); case 8: return DPQ4_1(args, levinvtrgamma); case 9: return DPQ4_1(args, levtrgamma); case 10: return DPQ4_1(args, dgenbeta); case 11: return DPQ4_2(args, pgenbeta); case 12: return DPQ4_2(args, qgenbeta); case 13: return DPQ4_1(args, mgenbeta); case 14: return DPQ4_1(args, levpareto2); case 15: return DPQ4_1(args, levpareto3); case 16: return DPQ4_1(args, dpareto4); case 17: return DPQ4_2(args, ppareto4); case 18: return DPQ4_2(args, qpareto4); case 19: return DPQ4_1(args, mpareto4); default: error(_("internal error in actuar_do_dpq4")); } return args; /* never used; to keep -Wall happy */ } /* Functions for five parameter distributions */ #define if_NA_dpq5_set(y, x, a, b, c, d, e) \ if (ISNA (x) || ISNA (a) || ISNA (b) || ISNA (c) || ISNA (d) || ISNA (e)) \ y = NA_REAL; \ else if (ISNAN(x) || ISNAN(a) || ISNAN(b) || ISNAN(c) || ISNAN(d) || ISNAN (e)) \ y = R_NaN; #define mod_iterate5(n1, n2, n3, n4, n5, n6, i1, i2, i3, i4, i5, i6) \ for (i = i1 = i2 = i3 = i4 = i5 = i6 = 0; i < n; \ i1 = (++i1 == n1) ? 0 : i1, \ i2 = (++i2 == n2) ? 0 : i2, \ i3 = (++i3 == n3) ? 0 : i3, \ i4 = (++i4 == n4) ? 0 : i4, \ i5 = (++i5 == n5) ? 0 : i5, \ i6 = (++i6 == n6) ? 0 : i6, \ ++i) static SEXP dpq5_1(SEXP sx, SEXP sa, SEXP sb, SEXP sc, SEXP sd, SEXP se, SEXP sI, double (*f)(double, double, double, double, double, double, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, ic, id, ie, n, nx, na, nb, nc, nd, ne; double xi, ai, bi, ci, di, ei, *x, *a, *b, *c, *d, *e, *y; Rboolean naflag = FALSE; #define SETUP_DPQ5 \ if (!isNumeric(sx) || !isNumeric(sa) || !isNumeric(sb) || \ !isNumeric(sc) || !isNumeric(sd) || !isNumeric(se)) \ error(_("invalid arguments")); \ \ nx = XLENGTH(sx); \ na = XLENGTH(sa); \ nb = XLENGTH(sb); \ nc = XLENGTH(sc); \ nd = XLENGTH(sd); \ ne = XLENGTH(se); \ if ((nx == 0) || (na == 0) || (nb == 0) || \ (nc == 0) || (nd == 0) || (ne == 0)) \ return(allocVector(REALSXP, 0)); \ n = nx; \ if (n < na) n = na; \ if (n < nb) n = nb; \ if (n < nc) n = nc; \ if (n < nd) n = nd; \ if (n < ne) n = ne; \ PROTECT(sx = coerceVector(sx, REALSXP)); \ PROTECT(sa = coerceVector(sa, REALSXP)); \ PROTECT(sb = coerceVector(sb, REALSXP)); \ PROTECT(sc = coerceVector(sc, REALSXP)); \ PROTECT(sd = coerceVector(sd, REALSXP)); \ PROTECT(se = coerceVector(se, REALSXP)); \ PROTECT(sy = allocVector(REALSXP, n)); \ x = REAL(sx); \ a = REAL(sa); \ b = REAL(sb); \ c = REAL(sc); \ d = REAL(sd); \ e = REAL(se); \ y = REAL(sy) SETUP_DPQ5; int i_1 = asInteger(sI); mod_iterate5(nx, na, nb, nc, nd, ne, ix, ia, ib, ic, id, ie) { xi = x[ix]; ai = a[ia]; bi = b[ib]; ci = c[ic]; di = d[id]; ei = e[ie]; if_NA_dpq5_set(y[i], xi, ai, bi, ci, di, ei) else { y[i] = f(xi, ai, bi, ci, di, ei, i_1); if (ISNAN(y[i])) naflag = TRUE; } } #define FINISH_DPQ5 \ if (naflag) \ warning(R_MSG_NA); \ \ if (n == nx) \ SHALLOW_DUPLICATE_ATTRIB(sy, sx); \ else if (n == na) \ SHALLOW_DUPLICATE_ATTRIB(sy, sa); \ else if (n == nb) \ SHALLOW_DUPLICATE_ATTRIB(sy, sb); \ else if (n == nc) \ SHALLOW_DUPLICATE_ATTRIB(sy, sc); \ else if (n == nd) \ SHALLOW_DUPLICATE_ATTRIB(sy, sd); \ else if (n == ne) \ SHALLOW_DUPLICATE_ATTRIB(sy, se); \ UNPROTECT(7) FINISH_DPQ5; return sy; } static SEXP dpq5_2(SEXP sx, SEXP sa, SEXP sb, SEXP sc, SEXP sd, SEXP se, SEXP sI, SEXP sJ, double (*f)(double, double, double, double, double, double, int, int)) { SEXP sy; R_xlen_t i, ix, ia, ib, ic, id, ie, n, nx, na, nb, nc, nd, ne; double xi, ai, bi, ci, di, ei, *x, *a, *b, *c, *d, *e, *y; Rboolean naflag = FALSE; SETUP_DPQ5; int i_1 = asInteger(sI), i_2 = asInteger(sJ); mod_iterate5(nx, na, nb, nc, nd, ne, ix, ia, ib, ic, id, ie) { xi = x[ix]; ai = a[ia]; bi = b[ib]; ci = c[ic]; di = d[id]; ei = e[ie]; if_NA_dpq5_set(y[i], xi, ai, bi, ci, di, ei) else { y[i] = f(xi, ai, bi, ci, di, ei, i_1, i_2); if (ISNAN(y[i])) naflag = TRUE; } } FINISH_DPQ5; return sy; } #define DPQ5_1(A, FUN) dpq5_1(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), CAD5R(A), CAD6R(A), FUN); #define DPQ5_2(A, FUN) dpq5_2(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), CAD5R(A), CAD6R(A), CAD7R(A), FUN) SEXP actuar_do_dpq5(int code, SEXP args) { switch (code) { case 1: return DPQ5_1(args, levtrbeta); case 2: return DPQ5_1(args, levgenbeta); case 3: return DPQ5_1(args, dfpareto); case 4: return DPQ5_2(args, pfpareto); case 5: return DPQ5_2(args, qfpareto); case 6: return DPQ5_1(args, mfpareto); case 7: return DPQ5_1(args, levpareto4); default: error(_("internal error in actuar_do_dpq5")); } return args; /* never used; to keep -Wall happy */ } /* Functions for six parameter distributions */ #define if_NA_dpq6_set(y, x, a, b, c, d, e, g) \ if (ISNA (x) || ISNA (a) || ISNA (b) || ISNA (c) || ISNA (d) || ISNA (e) || ISNA (g)) \ y = NA_REAL; \ else if (ISNAN(x) || ISNAN(a) || ISNAN(b) || ISNAN(c) || ISNAN(d) || ISNAN(e) || ISNAN(g)) \ y = R_NaN; #define mod_iterate6(n1, n2, n3, n4, n5, n6, n7, i1, i2, i3, i4, i5, i6, i7) \ for (i = i1 = i2 = i3 = i4 = i5 = i6 = i7 = 0; i < n; \ i1 = (++i1 == n1) ? 0 : i1, \ i2 = (++i2 == n2) ? 0 : i2, \ i3 = (++i3 == n3) ? 0 : i3, \ i4 = (++i4 == n4) ? 0 : i4, \ i5 = (++i5 == n5) ? 0 : i5, \ i6 = (++i6 == n6) ? 0 : i6, \ i7 = (++i7 == n7) ? 0 : i7, \ ++i) static SEXP dpq6_1(SEXP sx, SEXP sa, SEXP sb, SEXP sc, SEXP sd, SEXP se, SEXP sg, SEXP sI, double (*f)(double, double, double, double, double, double, double, int)) { SEXP sy; /* skip argument "sf" because "if" is a C keyword. */ R_xlen_t i, ix, ia, ib, ic, id, ie, ig, n, nx, na, nb, nc, nd, ne, ng; double xi, ai, bi, ci, di, ei, gi, *x, *a, *b, *c, *d, *e, *g, *y; Rboolean naflag = FALSE; #define SETUP_DPQ6 \ if (!isNumeric(sx) || !isNumeric(sa) || !isNumeric(sb) || \ !isNumeric(sc) || !isNumeric(sd) || !isNumeric(se) || \ !isNumeric(sg)) \ error(_("invalid arguments")); \ \ nx = XLENGTH(sx); \ na = XLENGTH(sa); \ nb = XLENGTH(sb); \ nc = XLENGTH(sc); \ nd = XLENGTH(sd); \ ne = XLENGTH(se); \ ng = XLENGTH(sg); \ if ((nx == 0) || (na == 0) || (nb == 0) || \ (nc == 0) || (nd == 0) || (ne == 0) || \ (ng == 0)) \ return(allocVector(REALSXP, 0)); \ n = nx; \ if (n < na) n = na; \ if (n < nb) n = nb; \ if (n < nc) n = nc; \ if (n < nd) n = nd; \ if (n < ne) n = ne; \ if (n < ng) n = ng; \ PROTECT(sx = coerceVector(sx, REALSXP)); \ PROTECT(sa = coerceVector(sa, REALSXP)); \ PROTECT(sb = coerceVector(sb, REALSXP)); \ PROTECT(sc = coerceVector(sc, REALSXP)); \ PROTECT(sd = coerceVector(sd, REALSXP)); \ PROTECT(se = coerceVector(se, REALSXP)); \ PROTECT(sg = coerceVector(sg, REALSXP)); \ PROTECT(sy = allocVector(REALSXP, n)); \ x = REAL(sx); \ a = REAL(sa); \ b = REAL(sb); \ c = REAL(sc); \ d = REAL(sd); \ e = REAL(se); \ g = REAL(sg); \ y = REAL(sy) SETUP_DPQ6; int i_1 = asInteger(sI); mod_iterate6(nx, na, nb, nc, nd, ne, ng, ix, ia, ib, ic, id, ie, ig) { xi = x[ix]; ai = a[ia]; bi = b[ib]; ci = c[ic]; di = d[id]; ei = e[ie]; gi = g[ig]; if_NA_dpq6_set(y[i], xi, ai, bi, ci, di, ei, gi) else { y[i] = f(xi, ai, bi, ci, di, ei, gi, i_1); if (ISNAN(y[i])) naflag = TRUE; } } #define FINISH_DPQ6 \ if (naflag) \ warning(R_MSG_NA); \ \ if (n == nx) \ SHALLOW_DUPLICATE_ATTRIB(sy, sx); \ else if (n == na) \ SHALLOW_DUPLICATE_ATTRIB(sy, sa); \ else if (n == nb) \ SHALLOW_DUPLICATE_ATTRIB(sy, sb); \ else if (n == nc) \ SHALLOW_DUPLICATE_ATTRIB(sy, sc); \ else if (n == nd) \ SHALLOW_DUPLICATE_ATTRIB(sy, sd); \ else if (n == ne) \ SHALLOW_DUPLICATE_ATTRIB(sy, se); \ else if (n == ng) \ SHALLOW_DUPLICATE_ATTRIB(sy, sg); \ UNPROTECT(8) FINISH_DPQ6; return sy; } #define DPQ6_1(A, FUN) dpq6_1(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), CAD5R(A), CAD6R(A), CAD7R(A), FUN); SEXP actuar_do_dpq6(int code, SEXP args) { switch (code) { case 1: return DPQ6_1(args, levfpareto); default: error(_("internal error in actuar_do_dpq6")); } return args; /* never used; to keep -Wall happy */ } /* Main function, the only one used by .External(). */ SEXP actuar_do_dpq(SEXP args) { int i; const char *name; /* Extract distribution name */ args = CDR(args); name = CHAR(STRING_ELT(CAR(args), 0)); /* Dispatch to actuar_do_dpq{1,2,3,4,5,6} */ for (i = 0; dpq_tab[i].name; i++) { if (!strcmp(dpq_tab[i].name, name)) { return dpq_tab[i].cfun(dpq_tab[i].code, CDR(args)); } } /* No dispatch is an error */ error("internal error in actuar_do_dpq"); return args; /* never used; to keep -Wall happy */ } actuar/src/burr.c0000644000176200001440000001053715147745722013462 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Burr distribution. See ../R/Burr.R for details. * * We work with the density expressed as * * shape1 * shape2 * u^shape1 * (1 - u) / x * * with u = 1/(1 + v), v = (x/scale)^shape2. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dburr(double x, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return x + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape2 < 1) return R_PosInf; if (shape2 > 1) return ACT_D__0; /* else */ return ACT_D_val(shape1 / scale); } double logv, logu, log1mu; logv = shape2 * (log(x) - log(scale)); logu = - log1pexp(logv); log1mu = - log1pexp(-logv); return ACT_D_exp(log(shape1) + log(shape2) + shape1 * logu + log1mu - log(x)); } double pburr(double q, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return q + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double u = exp(-log1pexp(shape2 * (log(q) - log(scale)))); return ACT_DT_Cval(R_pow(u, shape1)); } double qburr(double p, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return p + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * R_pow(R_pow(ACT_D_Cval(p), -1.0/shape1) - 1.0, 1.0/shape2); } double rburr(double shape1, double shape2, double scale) { if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; return scale * R_pow(R_pow(unif_rand(), -1.0/shape1) - 1.0, 1.0/shape2); } double mburr(double order, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return order + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape2 || order >= shape1 * shape2) return R_PosInf; double tmp = order / shape2; return R_pow(scale, order) * gammafn(1.0 + tmp) * gammafn(shape1 - tmp) / gammafn(shape1); } double levburr(double limit, double shape1, double shape2, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale) || ISNAN(order)) return limit + shape1 + shape2 + scale + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape2) return R_PosInf; if (limit <= 0.0) return 0.0; double logv, u, u1m; double tmp = order / shape2; logv = shape2 * (log(limit) - log(scale)); u = exp(-log1pexp(logv)); u1m = exp(-log1pexp(-logv)); return R_pow(scale, order) * betaint_raw(u1m, 1.0 + tmp, shape1 - tmp, u) / gammafn(shape1) + ACT_DLIM__0(limit, order) * R_pow(u, shape1); } actuar/src/pareto3.c0000644000176200001440000001243015147745722014057 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Pareto (type) III distribution. See ../R/Pareto3.R for * details. * * We work with the density expressed as * * shape * u * (1 - u) / (x - min) * * with u = v/(1 + v), v = ((x - min)/scale)^shape. * * AUTHOR: Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dpareto3(double x, double min, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return x + min + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < min) return ACT_D__0; /* handle x == min separately */ if (x == min) { if (shape < 1) return R_PosInf; if (shape > 1) return ACT_D__0; /* else */ return ACT_D_val(1.0/scale); } double logv, logu, log1mu; logv = shape * (log(x - min) - log(scale)); logu = - log1pexp(-logv); log1mu = - log1pexp(logv); return ACT_D_exp(log(shape) + logu + log1mu - log(x - min)); } double ppareto3(double q, double min, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return q + min + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (q <= min) return ACT_DT_0; double u = exp(-log1pexp(shape * (log(scale) - log(q - min)))); return ACT_DT_val(u); } double qpareto3(double p, double min, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return p + min + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return min + scale * R_pow(1.0/ACT_D_Cval(p) - 1.0, 1.0/shape); } double rpareto3(double min, double shape, double scale) { if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; return min + scale * R_pow(1.0/unif_rand() - 1.0, 1.0/shape); } double mpareto3(double order, double min, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; /* The case min = 0 is a loglogistic with a larger range of * admissible values for order: -shape < order < shape. */ if (min == 0.0) return mllogis(order, shape, scale, give_log); /* From now on min != 0 and order must be a stricly non negative * integer < shape. */ if (order < 0.0) return R_NaN; if (order >= shape) return R_PosInf; int i; double order0 = order; double tmp, sum, r = scale/min; if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = 1.0; /* first term in the sum */ for (i = 1; i <= order; i++) { tmp = i / shape; sum += choose(order, i) * R_pow(r, i) * gammafn(1.0 + tmp) * gammafn(1.0 - tmp); } return R_pow(min, order) * sum; } double levpareto3(double limit, double min, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(min) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + min + shape + scale + order; #endif if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (limit <= min) return 0.0; /* The case min = 0 is a loglogistic with a larger range of * admissible values for order: order > -shape. */ if (min == 0.0) return levllogis(limit, shape, scale, order, give_log); /* From now on min != 0 and order must be a stricly non negative * integer. */ if (order < 0.0) return R_NaN; int i; double order0 = order; double logv, u, u1m; double tmp, sum, r = scale / min; logv = shape * (log(limit - min) - log(scale)); u = exp(-log1pexp(-logv)); u1m = exp(-log1pexp(logv)); if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = betaint_raw(u, 1.0, 1.0, u1m); /* first term in the sum */ for (i = 1; i <= order; i++) { tmp = i / shape; sum += choose(order, i) * R_pow(r, i) * betaint_raw(u, 1.0 + tmp, 1.0 - tmp, u1m); } return R_pow(min, order) * sum + ACT_DLIM__0(limit, order) * (0.5 - u + 0.5); } actuar/src/gamma.c0000644000176200001440000000362515147745722013572 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to calculate raw and limited moments for the Gamma * distribution. See ../R/GammaSupp.R for details. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double mgamma(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape) return R_PosInf; return R_pow(scale, order) * gammafn(order + shape) / gammafn(shape); } double levgamma(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape) return R_PosInf; if (limit <= 0.0) return 0.0; double u, tmp; tmp = order + shape; u = exp(log(limit) - log(scale)); return R_pow(scale, order) * gammafn(tmp) * pgamma(u, tmp, 1.0, 1, 0) / gammafn(shape) + ACT_DLIM__0(limit, order) * pgamma(u, shape, 1.0, 0, 0); } double mgfgamma(double t, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(shape) || ISNAN(scale)) return t + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0 || scale * t > 1.) return R_NaN; if (t == 0.0) return ACT_D__1; return ACT_D_exp(-shape * log1p(-scale * t)); } actuar/src/pareto.c0000644000176200001440000000672215147745722014003 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Pareto distribution. See ../R/Pareto.R for details. * * We work with the density expressed as * * shape * u^shape * (1 - u) / x * * with u = 1/(1 + v), v = x/scale. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dpareto(double x, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape) || ISNAN(scale)) return x + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) return ACT_D_val(shape / scale); double logv, logu, log1mu; logv = log(x) - log(scale); logu = - log1pexp(logv); log1mu = - log1pexp(-logv); return ACT_D_exp(log(shape) + shape * logu + log1mu - log(x)); } double ppareto(double q, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape) || ISNAN(scale)) return q + shape + scale; #endif if (!R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double u = exp(-log1pexp(log(q) - log(scale))); return ACT_DT_Cval(R_pow(u, shape)); } double qpareto(double p, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape) || ISNAN(scale)) return p + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * (R_pow(ACT_D_Cval(p), -1.0/shape) - 1.0); } double rpareto(double shape, double scale) { if (!R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; return scale * (R_pow(unif_rand(), -1.0/shape) - 1.0); } double mpareto(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -1.0 || order >= shape) return R_PosInf; return R_pow(scale, order) * gammafn(1.0 + order) * gammafn(shape - order) / gammafn(shape); } double levpareto(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -1.0) return R_PosInf; if (limit <= 0.0) return 0.0; double logv, u, u1m; logv = log(limit) - log(scale); u = exp(-log1pexp(logv)); u1m = exp(-log1pexp(-logv)); return R_pow(scale, order) * betaint_raw(u1m, 1.0 + order, shape - order, u) / gammafn(shape) + ACT_DLIM__0(limit, order) * R_pow(u, shape); } actuar/src/weibull.c0000644000176200001440000000273115147745722014150 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Fonctions to calculate raw and limited moments for the Weibull * distribution. See ../R/WeibullMoments.R for details. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double mweibull(double order, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(scale) || !R_FINITE(shape) || !R_FINITE(order) || scale <= 0.0 || shape <= 0.0) return R_NaN; if (order <= -shape) return R_PosInf; return R_pow(scale, order) * gammafn(1.0 + order / shape); } double levweibull(double limit, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + shape + scale + order; #endif if (!R_FINITE(scale) || !R_FINITE(shape) || !R_FINITE(order) || scale <= 0.0 || shape <= 0.0) return R_NaN; if (order <= -shape) return R_PosInf; if (limit <= 0.0) return 0.0; double u, tmp; tmp = 1.0 + order / shape; u = exp(shape * (log(limit) - log(scale))); return R_pow(scale, order) * gammafn(tmp) * pgamma(u, tmp, 1.0, 1, 0) + ACT_DLIM__0(limit, order) * exp(-u); } actuar/src/invexp.c0000644000176200001440000000501015147745722014007 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the inverse exponential distribution. See ../R/InverseExponential.R * for details. * * We work with the density expressed as * * u * e^(-u) / x * * with u = scale/x. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dinvexp(double x, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(scale)) return x + scale; #endif if (!R_FINITE(scale) || scale < 0.0) return R_NaN; /* handle also x == 0 here */ if (!R_FINITE(x) || x <= 0.0) return ACT_D__0; double logu = log(scale) - log(x); return ACT_D_exp(logu - exp(logu) - log(x)); } double pinvexp(double q, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(scale)) return q + scale; #endif if (!R_FINITE(scale) || scale < 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double u = exp(log(scale) - log(q)); return ACT_DT_Eval(-u); } double qinvexp(double p, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(scale)) return p + scale; #endif if (!R_FINITE(scale) || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return -scale / log(ACT_D_Lval(p)); } double rinvexp(double scale) { if (!R_FINITE(scale) || scale <= 0.0) return R_NaN; return scale / rexp(1.0); } double minvexp(double order, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(scale)) return order + scale; #endif if (!R_FINITE(scale) || !R_FINITE(order) || scale <= 0.0) return R_NaN; if (order >= 1.0) return R_PosInf; return R_pow(scale, order) * gammafn(1.0 - order); } double levinvexp(double limit, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(scale) || ISNAN(order)) return limit + scale + order; #endif if (!R_FINITE(scale) || !R_FINITE(order) || scale <= 0.0) return R_NaN; if (limit <= 0.0) return 0.0; double u = exp(log(scale) - log(limit)); return R_pow(scale, order) * actuar_gamma_inc(1.0 - order, u) + ACT_DLIM__0(limit, order) * (0.5 - exp(-u) + 0.5); } actuar/src/phtype.c0000644000176200001440000001305515147745722014017 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and moment * generating functions, raw moments and to simulate random variates * for Phase-type distributions. See ../R/PhaseType.R for details. * * The density function is * * pi * exp(x * T) * t * (1 x m) (m x m) (m x 1) * * for x > 0, with t = -T * e and e a 1-vector, and 1 - pi * e * for x = 0. * * AUTHOR: Vincent Goulet */ #include #include #include #include #include "actuar.h" #include "locale.h" #include "dpq.h" double dphtype(double x, double *pi, double *T, int m, int give_log) { if (!R_FINITE(x) || x < 0.0) return ACT_D__0; if (x == 0.0) { int i; double z = 0.0; for (i = 0; i < m; i++) z += pi[i]; return ACT_D_Clog(z); } int i, j, ij; double *t, *tmp; /* Build vector t (equal to minus the row sums of matrix T) and * matrix tmp = x * T. */ t = (double *) S_alloc(m, sizeof(double)); /* initialized to 0 */ tmp = (double *) R_alloc(m * m, sizeof(double)); for (i = 0; i < m; i++) for (j = 0; j < m; j++) { ij = i + j * m; t[i] -= T[ij]; tmp[ij] = x * T[ij]; } return ACT_D_val(actuar_expmprod(pi, tmp, t, m)); } double pphtype(double q, double *pi, double *T, int m, int lower_tail, int log_p) { /* Cumulative distribution function is * * 1 - pi * exp(q * T) * e * (1 x m) (m x m) (m x 1) * * for x > 0, where e a 1-vector, and 1 - pi * e for x = 0. */ if (q < 0.0) return ACT_DT_0; if (q == 0.0) { int i; double z = 0.0; for (i = 0; i < m; i++) z += pi[i]; return ACT_DT_Cval(z); } int i; double *e, *tmp; /* Create the 1-vector and multiply each element of T by q. */ e = (double *) R_alloc(m, sizeof(double)); for (i = 0; i < m; i++) e[i] = 1; tmp = (double *) R_alloc(m * m, sizeof(double)); for (i = 0; i < m * m; i++) tmp[i] = q * T[i]; return ACT_DT_Cval(actuar_expmprod(pi, tmp, e, m)); } double rphtype(double *pi, double **Q, double *rates, int m) { /* Algorithm based on Neuts, M. F. (1981), "Generating random * variates from a distribution of phase type", WSC '81: * Proceedings of the 13th conference on Winter simulation, IEEE * Press, */ int i, j, state, *nvisits; double z = 0.0; nvisits = (int *) S_alloc(m, sizeof(int)); /* Simulate initial state according to vector pi (transient states * are numbered 0, ..., m - 1 and absorbing state is numbered * m). See the definition of SampleSingleValue() to see why this * works fine here and below. */ state = SampleSingleValue(m, pi); /* Simulate the underlying Markov chain using transition matrix Q * while counting the number of visits in each transient state. */ while (state != m) { nvisits[state]++; state = SampleSingleValue(m, Q[state]); } /* Variate is the sum of as many exponential variates as there are * visits in each state, with the rate parameter varying per * state. */ for (i = 0; i < m; i++) for (j = 0; j < nvisits[i]; j++) z += exp_rand() / rates[i]; return z; } double mphtype(double order, double *pi, double *T, int m, int give_log) { /* Raw moment is * * order! * pi * (-T)^(-order) * e * (1 x 1) (1 x m) (m x m) (m x 1) * * where e is a 1-vector. Below, the moment is computed as * (-1)^order * order! * sum(pi * T^(-order)) */ if (order < 0.0 || ACT_nonint(order)) return R_NaN; int i, j; double tmp = 0.0, *Tpow; /* Compute the power of T */ Tpow = (double *) R_alloc(m * m, sizeof(double)); actuar_matpow(T, m, (int) -order, Tpow); /* Compute vector tmp = sum(pi * Tpow) */ for (i = 0; i < m; i++) for (j = 0; j < m; j++) tmp += pi[j] * Tpow[i * m + j]; /* Multiply by -1 if order is odd */ return ACT_D_val((int) order % 2 ? -gammafn(order + 1.0) * tmp : gammafn(order + 1.0) * tmp); } double mgfphtype(double x, double *pi, double *T, int m, int give_log) { /* Moment generating function is * * pi * (-x * I - T)^(-1) * t + (1 - pi * e) * (1 x m) (m x m) (m x 1) (1 x m) (m x 1) * * with t = -T * e, e a 1-vector and I the identity matrix. * Below, the mgf is computed as 1 - pi * (e + (x * I + T)^(-1) * t. */ if (x == 0.0) return ACT_D_exp(0.0); int i, j, ij; double z = 0.0, *t, *tmp1, *tmp2; /* Build vector t (equal to minux the row sums of matrix T) and * matrix tmp1 = x * I + T. */ t = (double *) S_alloc(m, sizeof(double)); /* initialized to 0 */ tmp1 = (double *) R_alloc(m * m, sizeof(double)); for (i = 0; i < m; i++) for (j = 0; j < m; j++) { ij = i + j * m; t[i] -= T[ij]; tmp1[ij] = (i == j) ? x + T[ij] : T[ij]; } /* Compute tmp2 = tmp1^(-1) * t */ tmp2 = (double *) R_alloc(m, sizeof(double)); actuar_solve(tmp1, t, m, 1, tmp2); /* Compute z = pi * (e + tmp2) */ for (i = 0; i < m; i++) z += pi[i] * (1 + tmp2[i]); return ACT_D_Clog(z); } actuar/src/chisq.c0000644000176200001440000000533615147745722013620 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to calculate raw and limited moments for the Chi-square * distribution. See ../R/ChisqSupp.R for details. * * AUTHORS: Christophe Dutang and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double mchisq(double order, double df, double ncp, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(df) || ISNAN(ncp)) return order + df + ncp; #endif if (!R_FINITE(df) || !R_FINITE(ncp) || !R_FINITE(order) || df <= 0.0 || ncp < 0.0) return R_NaN; if (order <= -df/2.0) return R_PosInf; /* Trivial case */ if (order == 0.0) return 1.0; /* Centered chi-square distribution */ if (ncp == 0.0) return R_pow(2.0, order) * gammafn(order + df/2.0) / gammafn(df/2.0); /* Non centered chi-square distribution */ if (order >= 1.0 && (int) order == order) { int i, j = 0, n = order; double *res; /* Array with 1, E[X], E[X^2], ..., E[X^n] */ res = (double *) R_alloc(n + 1, sizeof(double)); res[0] = 1.0; res[1] = df + ncp; /* E[X] */ for (i = 2; i <= n; i++) { res[i] = R_pow_di(2.0, i - 1) * (df + i * ncp); for (j = 1; j < i; j++) res[i] += R_pow_di(2.0, j - 1) * (df + j * ncp) * res[i - j] / gammafn(i - j + 1); res[i] *= gammafn(i); } return res[n]; } else return R_NaN; } double levchisq(double limit, double df, double ncp, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(df) || ISNAN(ncp) || ISNAN(order)) return limit + df + ncp + order; #endif if (!R_FINITE(df) || !R_FINITE(ncp) || !R_FINITE(order) || df <= 0.0 || ncp < 0.0) return R_NaN; if (order <= -df/2.0) return R_PosInf; if (limit <= 0.0) return 0.0; if (ncp == 0.0) { double u, tmp; tmp = order + df/2.0; u = exp(log(limit) - M_LN2); return R_pow(2.0, order) * gammafn(tmp) * pgamma(u, tmp, 1.0, 1, 0) / gammafn(df/2.0) + ACT_DLIM__0(limit, order) * pgamma(u, df/2.0, 1.0, 0, 0); } else return R_NaN; } double mgfchisq(double t, double df, double ncp, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(df) || ISNAN(ncp)) return t + df + ncp; #endif if (!R_FINITE(df) || !R_FINITE(ncp) || df <= 0.0 || ncp < 0.0 || 2.0 * t > 1.0) return R_NaN; if (t == 0.0) return ACT_D__1; return ACT_D_exp(ncp * t / (1.0 - 2.0 * t) - df/2.0 * log1p(-2.0 * t)); } actuar/src/names.c0000644000176200001440000003043115147745722013606 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Table of functions internal to the package. First element is an * argument to one of actuar_do_dpq or actuar_do_random, functions * callable from .External(); second element is the C function * actually called; third element is a code used in the latter. * * Idea taken from R sources (see src/main/names.c). * * AUTHOR: Vincent Goulet */ #include #include "actuar.h" /* DENSITY, CUMULATIVE PROBABILITY AND QUANTILE FUNCTIONS, * RAW AND LIMITED MOMENTS */ dpq_tab_struct dpq_tab[] = { /* One parameter distributions */ {"mexp", actuar_do_dpq1, 1}, {"dinvexp", actuar_do_dpq1, 2}, {"pinvexp", actuar_do_dpq1, 3}, {"qinvexp", actuar_do_dpq1, 4}, {"minvexp", actuar_do_dpq1, 5}, {"mgfexp", actuar_do_dpq1, 6}, {"dlogarithmic", actuar_do_dpq1, 101}, {"plogarithmic", actuar_do_dpq1, 102}, {"qlogarithmic", actuar_do_dpq1, 103}, {"dztpois", actuar_do_dpq1, 104}, {"pztpois", actuar_do_dpq1, 105}, {"qztpois", actuar_do_dpq1, 106}, {"dztgeom", actuar_do_dpq1, 107}, {"pztgeom", actuar_do_dpq1, 108}, {"qztgeom", actuar_do_dpq1, 109}, /* Two parameter distributions */ {"mgamma", actuar_do_dpq2, 1}, {"dinvgamma", actuar_do_dpq2, 2}, {"pinvgamma", actuar_do_dpq2, 3}, {"qinvgamma", actuar_do_dpq2, 4}, {"minvgamma", actuar_do_dpq2, 5}, {"dinvparalogis", actuar_do_dpq2, 6}, {"pinvparalogis", actuar_do_dpq2, 7}, {"qinvparalogis", actuar_do_dpq2, 8}, {"minvparalogis", actuar_do_dpq2, 9}, {"dinvpareto", actuar_do_dpq2, 10}, {"pinvpareto", actuar_do_dpq2, 11}, {"qinvpareto", actuar_do_dpq2, 12}, {"minvpareto", actuar_do_dpq2, 13}, {"dinvweibull", actuar_do_dpq2, 14}, {"pinvweibull", actuar_do_dpq2, 15}, {"qinvweibull", actuar_do_dpq2, 16}, {"minvweibull", actuar_do_dpq2, 17}, {"dlgamma", actuar_do_dpq2, 18}, {"plgamma", actuar_do_dpq2, 19}, {"qlgamma", actuar_do_dpq2, 20}, {"mlgamma", actuar_do_dpq2, 21}, {"dllogis", actuar_do_dpq2, 22}, {"pllogis", actuar_do_dpq2, 23}, {"qllogis", actuar_do_dpq2, 24}, {"mllogis", actuar_do_dpq2, 25}, {"mlnorm", actuar_do_dpq2, 26}, {"dparalogis", actuar_do_dpq2, 27}, {"pparalogis", actuar_do_dpq2, 28}, {"qparalogis", actuar_do_dpq2, 29}, {"mparalogis", actuar_do_dpq2, 30}, {"dpareto", actuar_do_dpq2, 31}, {"ppareto", actuar_do_dpq2, 32}, {"qpareto", actuar_do_dpq2, 33}, {"mpareto", actuar_do_dpq2, 34}, {"dpareto1", actuar_do_dpq2, 35}, {"ppareto1", actuar_do_dpq2, 36}, {"qpareto1", actuar_do_dpq2, 37}, {"mpareto1", actuar_do_dpq2, 38}, {"mweibull", actuar_do_dpq2, 39}, {"levexp", actuar_do_dpq2, 40}, {"levinvexp", actuar_do_dpq2, 41}, {"mbeta", actuar_do_dpq2, 42}, {"mgfgamma", actuar_do_dpq2, 43}, {"mgfnorm", actuar_do_dpq2, 44}, {"mgfunif", actuar_do_dpq2, 45}, {"mgfinvgamma", actuar_do_dpq2, 46}, {"mnorm", actuar_do_dpq2, 47}, {"mchisq", actuar_do_dpq2, 48}, {"mgfchisq", actuar_do_dpq2, 49}, /* {"minvGauss", actuar_do_dpq2, 50}, [defunct v3.0-0] */ /* {"mgfinvGauss", actuar_do_dpq2, 51}, [defunct v3.0-0] */ {"munif", actuar_do_dpq2, 52}, {"dgumbel", actuar_do_dpq2, 53}, {"pgumbel", actuar_do_dpq2, 54}, {"qgumbel", actuar_do_dpq2, 55}, {"mgumbel", actuar_do_dpq2, 56}, {"mgfgumbel", actuar_do_dpq2, 57}, {"dinvgauss", actuar_do_dpq2, 58}, {"pinvgauss", actuar_do_dpq2, 59}, {"qinvgauss", actuar_do_dpq2, 60}, {"minvgauss", actuar_do_dpq2, 61}, {"mgfinvgauss", actuar_do_dpq2, 62}, {"dztnbinom", actuar_do_dpq2, 101}, {"pztnbinom", actuar_do_dpq2, 102}, {"qztnbinom", actuar_do_dpq2, 103}, {"dztbinom", actuar_do_dpq2, 104}, {"pztbinom", actuar_do_dpq2, 105}, {"qztbinom", actuar_do_dpq2, 106}, {"dzmlogarithmic", actuar_do_dpq2, 107}, {"pzmlogarithmic", actuar_do_dpq2, 108}, {"qzmlogarithmic", actuar_do_dpq2, 109}, {"dzmpois", actuar_do_dpq2, 110}, {"pzmpois", actuar_do_dpq2, 111}, {"qzmpois", actuar_do_dpq2, 112}, {"dzmgeom", actuar_do_dpq2, 113}, {"pzmgeom", actuar_do_dpq2, 114}, {"qzmgeom", actuar_do_dpq2, 115}, {"dpoisinvgauss", actuar_do_dpq2, 116}, {"ppoisinvgauss", actuar_do_dpq2, 117}, {"qpoisinvgauss", actuar_do_dpq2, 118}, /* Three parameter distributions */ {"dburr", actuar_do_dpq3, 1}, {"pburr", actuar_do_dpq3, 2}, {"qburr", actuar_do_dpq3, 3}, {"mburr", actuar_do_dpq3, 4}, {"dgenpareto", actuar_do_dpq3, 5}, {"pgenpareto", actuar_do_dpq3, 6}, {"qgenpareto", actuar_do_dpq3, 7}, {"mgenpareto", actuar_do_dpq3, 8}, {"dinvburr", actuar_do_dpq3, 9}, {"pinvburr", actuar_do_dpq3, 10}, {"qinvburr", actuar_do_dpq3, 11}, {"minvburr", actuar_do_dpq3, 12}, {"dinvtrgamma", actuar_do_dpq3, 13}, {"pinvtrgamma", actuar_do_dpq3, 14}, {"qinvtrgamma", actuar_do_dpq3, 15}, {"minvtrgamma", actuar_do_dpq3, 16}, {"dtrgamma", actuar_do_dpq3, 17}, {"ptrgamma", actuar_do_dpq3, 18}, {"qtrgamma", actuar_do_dpq3, 19}, {"mtrgamma", actuar_do_dpq3, 20}, {"levgamma", actuar_do_dpq3, 21}, {"levinvgamma", actuar_do_dpq3, 22}, {"levinvparalogis", actuar_do_dpq3, 23}, {"levinvpareto", actuar_do_dpq3, 24}, {"levinvweibull", actuar_do_dpq3, 25}, {"levlgamma", actuar_do_dpq3, 26}, {"levllogis", actuar_do_dpq3, 27}, {"levlnorm", actuar_do_dpq3, 28}, {"levparalogis", actuar_do_dpq3, 29}, {"levpareto", actuar_do_dpq3, 30}, {"levpareto1", actuar_do_dpq3, 31}, {"levweibull", actuar_do_dpq3, 32}, {"levbeta", actuar_do_dpq3, 33}, {"levchisq", actuar_do_dpq3, 34}, /* {"levinvGauss", actuar_do_dpq3, 35}, [defunct v3.0-0] */ {"levunif", actuar_do_dpq3, 36}, {"levinvgauss", actuar_do_dpq3, 37}, {"dpareto2", actuar_do_dpq3, 38}, {"ppareto2", actuar_do_dpq3, 39}, {"qpareto2", actuar_do_dpq3, 40}, {"mpareto2", actuar_do_dpq3, 41}, {"dpareto3", actuar_do_dpq3, 42}, {"ppareto3", actuar_do_dpq3, 43}, {"qpareto3", actuar_do_dpq3, 44}, {"mpareto3", actuar_do_dpq3, 45}, {"dzmnbinom", actuar_do_dpq3, 101}, {"pzmnbinom", actuar_do_dpq3, 102}, {"qzmnbinom", actuar_do_dpq3, 103}, {"dzmbinom", actuar_do_dpq3, 104}, {"pzmbinom", actuar_do_dpq3, 105}, {"qzmbinom", actuar_do_dpq3, 106}, /* Four parameter distributions */ {"dtrbeta", actuar_do_dpq4, 1}, {"ptrbeta", actuar_do_dpq4, 2}, {"qtrbeta", actuar_do_dpq4, 3}, {"mtrbeta", actuar_do_dpq4, 4}, {"levburr", actuar_do_dpq4, 5}, {"levgenpareto", actuar_do_dpq4, 6}, {"levinvburr", actuar_do_dpq4, 7}, {"levinvtrgamma", actuar_do_dpq4, 8}, {"levtrgamma", actuar_do_dpq4, 9}, {"dgenbeta", actuar_do_dpq4, 10}, {"pgenbeta", actuar_do_dpq4, 11}, {"qgenbeta", actuar_do_dpq4, 12}, {"mgenbeta", actuar_do_dpq4, 13}, {"levpareto2", actuar_do_dpq4, 14}, {"levpareto3", actuar_do_dpq4, 15}, {"dpareto4", actuar_do_dpq4, 16}, {"ppareto4", actuar_do_dpq4, 17}, {"qpareto4", actuar_do_dpq4, 18}, {"mpareto4", actuar_do_dpq4, 19}, /* Five parameter distributions */ {"levtrbeta", actuar_do_dpq5, 1}, {"levgenbeta", actuar_do_dpq5, 2}, {"dfpareto", actuar_do_dpq5, 3}, {"pfpareto", actuar_do_dpq5, 4}, {"qfpareto", actuar_do_dpq5, 5}, {"mfpareto", actuar_do_dpq5, 6}, {"levpareto4", actuar_do_dpq5, 7}, /* Six parameter distributions */ {"levfpareto", actuar_do_dpq6, 1}, /* Phase-type distributions */ {"dphtype", actuar_do_dpqphtype2, 1}, {"pphtype", actuar_do_dpqphtype2, 2}, {"mphtype", actuar_do_dpqphtype2, 3}, {"mgfphtype", actuar_do_dpqphtype2, 4}, {0, 0, 0} }; /* RANDOM NUMBERS FUNCTIONS */ random_tab_struct random_tab[] = { /* One parameter distributions */ {"rinvexp", actuar_do_random1, 1, REALSXP}, {"rlogarithmic", actuar_do_random1, 101, INTSXP}, {"rztpois", actuar_do_random1, 102, INTSXP}, {"rztgeom", actuar_do_random1, 103, INTSXP}, /* Two parameter distributions */ {"rinvgamma", actuar_do_random2, 1, REALSXP}, {"rinvparalogis", actuar_do_random2, 2, REALSXP}, {"rinvpareto", actuar_do_random2, 3, REALSXP}, {"rinvweibull", actuar_do_random2, 4, REALSXP}, {"rlgamma", actuar_do_random2, 5, REALSXP}, {"rllogis", actuar_do_random2, 6, REALSXP}, {"rparalogis", actuar_do_random2, 7, REALSXP}, {"rpareto", actuar_do_random2, 8, REALSXP}, {"rpareto1", actuar_do_random2, 9, REALSXP}, {"rgumbel", actuar_do_random2, 10, REALSXP}, {"rinvgauss", actuar_do_random2, 11, REALSXP}, {"rztnbinom", actuar_do_random2, 101, INTSXP}, {"rztbinom", actuar_do_random2, 102, INTSXP}, {"rzmlogarithmic", actuar_do_random2, 103, INTSXP}, {"rzmpois", actuar_do_random2, 104, INTSXP}, {"rzmgeom", actuar_do_random2, 105, INTSXP}, {"rpoisinvgauss", actuar_do_random2, 106, INTSXP}, /* Three parameter distributions */ {"rburr", actuar_do_random3, 1, REALSXP}, {"rgenpareto", actuar_do_random3, 2, REALSXP}, {"rinvburr", actuar_do_random3, 3, REALSXP}, {"rinvtrgamma", actuar_do_random3, 4, REALSXP}, {"rtrgamma", actuar_do_random3, 5, REALSXP}, {"rpareto2", actuar_do_random3, 6, REALSXP}, {"rpareto3", actuar_do_random3, 7, REALSXP}, {"rzmnbinom", actuar_do_random3, 101, INTSXP}, {"rzmbinom", actuar_do_random3, 102, INTSXP}, /* Four parameter distributions */ {"rtrbeta", actuar_do_random4, 1, REALSXP}, {"rgenbeta", actuar_do_random4, 2, REALSXP}, {"rpareto4", actuar_do_random4, 3, REALSXP}, /* Five parameter distributions */ {"rfpareto", actuar_do_random5, 1, REALSXP}, /* Phase-type distributions */ {"rphtype", actuar_do_randomphtype2, 1, REALSXP}, {0, 0, 0} }; actuar/src/pareto2.c0000644000176200001440000001225015147745722014056 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Pareto (type) II distribution. See ../R/Pareto2.R for * details. * * We work with the density expressed as * * shape * u^shape * (1 - u) / (x - min) * * with u = 1/(1 + v), v = (x - min)/scale. * * AUTHOR: Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dpareto2(double x, double min, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return x + min + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < min) return ACT_D__0; /* handle x == min separately */ if (x == min) return ACT_D_val(shape / scale); double logv, logu, log1mu; logv = log(x - min) - log(scale); logu = - log1pexp(logv); log1mu = - log1pexp(-logv); return ACT_D_exp(log(shape) + shape * logu + log1mu - log(x - min)); } double ppareto2(double q, double min, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return q + min + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (q <= min) return ACT_DT_0; double u = exp(-log1pexp(log(q - min) - log(scale))); return ACT_DT_Cval(R_pow(u, shape)); } double qpareto2(double p, double min, double shape, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return p + min + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return min + scale * (R_pow(ACT_D_Cval(p), -1.0/shape) - 1.0); } double rpareto2(double min, double shape, double scale) { if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || shape <= 0.0 || scale <= 0.0) return R_NaN; return min + scale * (R_pow(unif_rand(), -1.0/shape) - 1.0); } double mpareto2(double order, double min, double shape, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(min) || ISNAN(shape) || ISNAN(scale)) return order + shape + scale; #endif if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; /* The case min = 0 is a Pareto with a larger range of admissible * values for order: -1 < order < shape. */ if (min == 0.0) return mpareto(order, shape, scale, give_log); /* From now on min != 0 and order must be a stricly non negative * integer < shape. */ if (order < 0.0) return R_NaN; if (order >= shape) return R_PosInf; int i; double order0 = order; double sum, r = scale/min; double Ga = gammafn(shape); if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = Ga; /* first term in the sum */ for (i = 1; i <= order; i++) { sum += choose(order, i) * R_pow(r, i) * gammafn(1.0 + i) * gammafn(shape - i); } return R_pow(min, order) * sum / Ga; } double levpareto2(double limit, double min, double shape, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(min) || ISNAN(shape) || ISNAN(scale) || ISNAN(order)) return limit + min + shape + scale + order; #endif if (!R_FINITE(min) || !R_FINITE(shape) || !R_FINITE(scale) || !R_FINITE(order) || shape <= 0.0 || scale <= 0.0) return R_NaN; if (limit <= min) return 0.0; /* The case min = 0 is a Pareto with a larger range of admissible * values for order: order > -1. */ if (min == 0.0) return levpareto(limit, shape, scale, order, give_log); /* From now on min != 0 and order must be a stricly non negative * integer. */ if (order < 0.0) return R_NaN; int i; double order0 = order; double logv, u, u1m; double sum, r = scale / min; logv = log(limit - min) - log(scale); u = exp(-log1pexp(logv)); u1m = exp(-log1pexp(-logv)); if (ACT_nonint(order)) { order = ACT_forceint(order); warning(_("'order' (%.2f) must be integer, rounded to %.0f"), order0, order); } sum = betaint_raw(u1m, 1.0, shape, u); /* first term in the sum */ for (i = 1; i <= order; i++) { sum += choose(order, i) * R_pow(r, i) * betaint_raw(u1m, 1.0 + i, shape - i, u); } return R_pow(min, order) * sum / gammafn(shape) + ACT_DLIM__0(limit, order) * R_pow(u, shape); } actuar/src/invgauss.c0000644000176200001440000002345015151206331014324 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the inverse gaussian distribution. See ../R/InverseGaussian.R * for details. * * We work with the density expressed as * * (2 pi phi x^3)^(-1/2) exp(- u^2/(2 phi x)) * * with u = (x - mu)/mu. * * The code for functions [dpqr]invgauss() is a C implementation of * functions of the same functions in package statmod; see: * * Giner, G. and Smyth, G. K. (2016), "statmod: Probability * Calculations for the Inverse Gaussian Distribution", R * Journal, vol. 8, no 1, p. 339-351. * https://journal.r-project.org/archive/2016-1/giner-smyth.pdf * * AUTHOR (original R implementation): Gordon Smyth * AUTHOR (C implementation): Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dinvgauss(double x, double mu, double phi, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(mu) || ISNAN(phi)) return x + mu + phi; #endif if (mu <= 0.0) return R_NaN; if (phi <= 0) { if (phi < 0) return R_NaN; /* phi == 0 */ return (x == 0.0) ? R_PosInf : ACT_D__0; } if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* limiting case phi = Inf: spike at zero */ if (x == 0) return R_FINITE(phi) ? ACT_D__0 : R_PosInf; /* limiting case mu = Inf: inverse chi-square distribution [a.k.a * inverse gamma with shape = 1/2, scale = 1/(2 * phi)] */ if (!R_FINITE(mu)) return ACT_D_exp(-(log(phi) + 3 * log(x) + 1/phi/x)/2 - M_LN_SQRT_2PI); /* standard cases */ x = x/mu; phi = phi * mu; return ACT_D_exp(-(log(phi) + 3 * log(x) + R_pow_di(x - 1, 2)/phi/x)/2 - M_LN_SQRT_2PI - log(mu)); } /* This is used in pinvgauss() for limit cases. */ #define PSMALLCV2 1e-14 #define RIGHTLARGEQUANTILE 1e6 #define RIGHTLARGEQUANTILEMOD 5e5 double pinvgauss(double q, double mu, double phi, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(mu) || ISNAN(phi)) return q + mu + phi; #endif if (mu <= 0.0) return R_NaN; if (phi <= 0) { if (phi < 0) return R_NaN; /* phi == 0 : */ return (q == 0) ? ACT_DT_0 : ACT_DT_1; } if (q < 0) return ACT_DT_0; /* limiting case phi = Inf */ if (q == 0) return R_FINITE(phi) ? ACT_DT_0 : ACT_DT_1; if (!R_FINITE(q)) return ACT_DT_1; /* limiting case mu = Inf */ if (!R_FINITE(mu)) return pchisq(1/q/phi, 1, !lower_tail, log_p); /* standard cases */ double qm = q/mu; double phim = phi * mu; /* gamma approximation when the coefficient of variation phi * mu is very small */ if (phim < PSMALLCV2) return ACT_D_exp(pgamma(q, 1/phim, phim * mu, /* l._t. */1, /* log_p */1)); /* approximation for (survival) probabilities in the far right tail */ if (!lower_tail && qm > RIGHTLARGEQUANTILE) { double r = qm/2/phim; if (r > RIGHTLARGEQUANTILEMOD) return ACT_D_exp(1/phim - M_LN_SQRT_PI - log(2*phim) - 1.5 * log1p(r) - r); } /* all other probabilities */ double r = sqrt(q * phi); double a = pnorm((qm - 1)/r, 0, 1, lower_tail, /* log_p */1); double b = 2/phim + pnorm(-(qm + 1)/r, 0, 1, /* l._t. */1, /* log_p */1); return ACT_D_exp(a + (lower_tail ? log1p(exp(b - a)) : log1mexp(a - b))); } /* This is used in nrstep() to return either dx or -dx. */ #define ACT_S_val(x) (lower_tail ? x : -x) /* Needed by qinvgauss() for Newton-Raphson iterations. */ double nrstep(double x, double p, double logp, double phi, int lower_tail) { double logF = pinvgauss(x, 1, phi, lower_tail, /*log.p*/1); double dlogp = logp - logF; return ACT_S_val(((fabs(dlogp) < 1e-5) ? dlogp * exp(logp + log1p(-dlogp/2)) : p - exp(logF)) / dinvgauss(x, 1, phi, 0)); } /* This is used in qinvgauss() for limit cases. */ #define QSMALLCV2 1e-8 #define LARGEKAPPA 1e3 #define LEFTSMALLPROBLOG -11.51 #define RIGHTSMALLPROBLOG -1e-5 double qinvgauss(double p, double mu, double phi, int lower_tail, int log_p, double tol, int maxit, int echo) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(mu) || ISNAN(phi)) return p + mu + phi; #endif if (mu <= 0.0 || phi <= 0.0) return R_NaN; /* limiting case phi = Inf */ if (!R_FINITE(phi)) return 1.0; /* limiting case mu = Inf */ if (!R_FINITE(mu)) return 1/phi/qchisq(p, 1, !lower_tail, log_p); ACT_Q_P01_boundaries(p, 0, R_PosInf); /* must be able to do at least one iteration */ if (maxit < 1) error(_("maximum number of iterations must be at least 1")); int i = 1; double logp, kappa, mode, x, dx, s; /* make sure we have both p and log(p) for the sequel */ if (log_p) { logp = p; p = exp(p); } else logp = log(p); /* convert to mean = 1 */ phi *= mu; /* gamma approximation when the coefficient of variation phi * mu is very small; here we have mean = 1 */ if (phi < QSMALLCV2) return mu * qgamma(logp, 1/phi, phi, /* l._t. */1, /* log_p */1); /* mode */ kappa = 1.5 * phi; if (kappa <= LARGEKAPPA) mode = sqrt(1 + kappa * kappa) - kappa; else /* Taylor series correction */ { double k = 1.0/2.0/kappa; mode = k * (1 - k * k); } /* starting value: inverse chi squared for small left tail prob; * qgamma for small right tail prob; mode otherwise */ if (logp < LEFTSMALLPROBLOG) x = lower_tail ? 1/phi/R_pow_di(qnorm(logp, 0, 1, lower_tail, 1), 2) : fmax2(mode, qgamma(logp, 1/phi, phi, lower_tail, 1)); else if (logp > RIGHTSMALLPROBLOG) x = lower_tail ? qgamma(logp, 1/phi, phi, lower_tail, 1) : fmax2(mode, 1/phi/R_pow_di(qnorm(logp, 0, 1, lower_tail, 1), 2)); else x = mode; /* if echoing iterations, start by printing the header and the * first value */ if (echo) Rprintf("iter\tadjustment\tquantile\n%d\t ---- \t%.8g\n", 0, x); /* first Newton-Raphson outside the loop to retain the sign of * the adjustment */ dx = nrstep(x, p, logp, phi, lower_tail); s = sign(dx); x += dx; if (echo) Rprintf("%d\t%-14.8g\t%.8g\n", i, dx, x); /* now do the iterations */ do { i++; if (i > maxit) { warning(_("maximum number of iterations reached before obtaining convergence")); break; } dx = nrstep(x, p, logp, phi, lower_tail); /* change of sign indicates that machine precision has been overstepped */ if (dx * s < 0) dx = 0; else x += dx; if (echo) Rprintf("%d\t%-14.8g\t%.8g\n", i, dx, x); } while (fabs(dx) > tol); return x * mu; } /* This is used in rinvgauss() as the threshold for a "large" value of * y * phi. */ #define YPHILARGE 5e5 double rinvgauss(double mu, double phi) { if (mu <= 0.0 || phi <= 0.0) return R_NaN; /* limiting case phi = Inf */ if (!R_FINITE(phi)) return 0.0; /* limiting case mu = Inf */ if (!R_FINITE(mu)) return 1/phi/rchisq(1); /* generate y and convert to mean = 1 */ double yphi = R_pow_di(rnorm(0, 1), 2) * phi * mu; /* Taylor series is more accurate when y * phi is large */ double x; if (yphi > YPHILARGE) x = 1/yphi; else x = 1 + yphi/2 * (1 - sqrt(1 + 4/yphi)); return mu * ((unif_rand() <= 1/(1 + x)) ? x : 1/x); } double minvgauss(double order, double mu, double phi, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(mu) || ISNAN(phi)) return order + mu + phi; #endif if (mu <= 0.0 || phi <= 0.0 || order < 0 || ACT_nonint(order)) return R_NaN; /* trivial case */ if (order == 0.0) return 0.0; /* limiting case phi = Inf */ if (!R_FINITE(phi)) return 0.0; /* limiting case mu = Inf */ /* [no finite strictly positive, integer moments] */ if (!R_FINITE(mu)) return R_PosInf; int i, k = order; double term, s, phir = phi * mu/2; s = term = 1.0; /* first term (i = 0) */ for (i = 1; i < k; i++) { term *= ((k + i - 1) * (k - i)/i) * phir; s += term; } return R_pow_di(mu, k) * s; } /* The lev function is very similar to the pdf. It can be written as * * levinvgauss(x; mu, phi) = mu [pnorm((xm - 1)/r) * - exp(2/phim) pnorm(-(xm + 1)/r)] * + x (1 - pinvgauss(x; mu, phi) * * where xm = x/mu, phim = phi * mu, r = sqrt(x * phi). */ double levinvgauss(double limit, double mu, double phi, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(mu) || ISNAN(phi) || ISNAN(order)) return limit + mu + phi + order; #endif if (mu <= 0.0 || phi < 0.0 || order != 1.0) return R_NaN; if (limit <= 0.0 || !R_FINITE(phi)) return 0.0; if (!R_FINITE(limit) || !R_FINITE(mu)) return mu; /* calculations very similar to those in pinvgauss(); we do * everything here and avoid calling the latter */ double xm = limit/mu, phim = phi * mu, r = sqrt(limit * phi); double x = (xm - 1)/r; double a = pnorm(x, 0, 1, /*l._t.*/1, /* log_p */1); double ap = pnorm(x, 0, 1, /*l._t.*/0, /* log_p */1); double b = 2/phim + pnorm(-(xm + 1)/r, 0, 1, /* l._t. */1, /* log_p */1); return mu * exp(a + log1mexp(a - b)) + limit * exp(ap + log1mexp(ap - b)); } double mgfinvgauss(double t, double mu, double phi, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(mu) || ISNAN(phi)) return t + mu + phi; #endif if (mu <= 0.0 || phi < 0.0 || t > 1/phi/(2.0 * mu * mu)) return R_NaN; /* trivial case */ if (t == 0.0) return ACT_D__1; /* limiting case phi = Inf */ if (!R_FINITE(phi)) return ACT_D__0; /* limiting case mu = Inf */ if (!R_FINITE(mu)) return R_PosInf; /* convert to mean = 1 */ phi *= mu; t *= mu; return ACT_D_exp((1 - sqrt(1 - 2 * phi * t))/phi); } actuar/src/lnorm.c0000644000176200001440000000256315147745722013637 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Fonctions to calculate raw and limited moments for the lognormal * distribution. See ../R/LognormalMoments.R for details. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double mlnorm(double order, double logmean, double logsd, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(logmean) || ISNAN(logsd)) return order + logmean + logsd; #endif if (!R_FINITE(logmean) || !R_FINITE(logsd) || !R_FINITE(order) || logsd <= 0.0) return R_NaN; return exp(order * (logmean + 0.5 * order * R_pow_di(logsd, 2))); } double levlnorm(double limit, double logmean, double logsd, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(logmean) || ISNAN(logsd) || ISNAN(order)) return limit + logmean + logsd + order; #endif if (!R_FINITE(logmean) || !R_FINITE(logsd) || !R_FINITE(order) || logsd <= 0.0) return R_NaN; if (limit <= 0.0) return 0.0; double u = (log(limit) - logmean)/logsd; return exp(order * (logmean + 0.5 * order * R_pow(logsd, 2.0))) * pnorm(u - order * logsd, 0., 1.0, 1, 0) + ACT_DLIM__0(limit, order) * pnorm(u, 0., 1.0, 0, 0); } actuar/src/genpareto.c0000644000176200001440000001147315147745722014474 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Generalized Pareto distribution.. See ../R/GeneralizedPareto.R * for details. * * We work with the density expressed as * * u^shape2 * (1 - u)^shape1 / (x * beta(shape1, shape2)) * * with u = v/(1 + v) = 1/(1 + 1/v), v = x/scale. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dgenpareto(double x, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return x + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (!R_FINITE(x) || x < 0.0) return ACT_D__0; /* handle x == 0 separately */ if (x == 0.0) { if (shape2 < 1) return R_PosInf; if (shape2 > 1) return ACT_D__0; /* else */ return give_log ? - log(scale) - lbeta(shape2, shape1) : 1.0/(scale * beta(shape2, shape1)); } double logv, logu, log1mu; logv = log(x) - log(scale); logu = - log1pexp(-logv); log1mu = - log1pexp(logv); return ACT_D_exp(shape2 * logu + shape1 * log1mu - log(x) - lbeta(shape2, shape1)); } double pgenpareto(double q, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return q + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; double logvm, u; logvm = log(scale) - log(q); /* -log v */ u = exp(-log1pexp(logvm)); if (u > 0.5) { /* Compute (1 - u) accurately */ double u1m = exp(-log1pexp(-logvm)); return pbeta(u1m, shape1, shape2, 1 - lower_tail, log_p); } /* else u <= 0.5 */ return pbeta(u, shape2, shape1, lower_tail, log_p); } double qgenpareto(double p, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return p + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale / (1.0 / qbeta(p, shape2, shape1, lower_tail, 0) - 1.0); } double rgenpareto(double shape1, double shape2, double scale) { if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; return scale / (1.0 / rbeta(shape2, shape1) - 1.0); } double mgenpareto(double order, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return order + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape2 || order >= shape1) return R_PosInf; return R_pow(scale, order) * beta(shape1 - order, shape2 + order) / beta(shape1, shape2); } double levgenpareto(double limit, double shape1, double shape2, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale) || ISNAN(order)) return limit + shape1 + shape2 + scale + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= -shape2) return R_PosInf; if (limit <= 0.0) return 0.0; double logv, u, u1m, Ix; logv = log(limit) - log(scale); u = exp(-log1pexp(-logv)); u1m = exp(-log1pexp(logv)); Ix = (u > 0.5) ? pbeta(u1m, shape1, shape2, /*l._t.*/1, /*give_log*/0) : pbeta(u, shape2, shape1, /*l._t.*/0, /*give_log*/0); return R_pow(scale, order) * betaint_raw(u, shape2 + order, shape1 - order, u1m) / (gammafn(shape1) * gammafn(shape2)) + ACT_DLIM__0(limit, order) * Ix; } actuar/src/ztnbinom.c0000644000176200001440000000743215151206331014327 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-truncated negative binomial distribution. See * ../R/ZeroTruncatedNegativeBinomial.R for details. * * Let X ~ NegativeBinomial(size, prob). The probability mass function of the * zero-truncated Negative Binomial random variable Z is * * Pr[Z = 0] = 0 * Pr[Z = x] = Pr[X = x]/(1 - prob^size), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = (Pr[X <= x] - prob^size)/(1 - prob^size) * * Limiting cases: * * 1. size == 0 is Logarithmic(1 - prob) (according to the standard * parametrization of the logarithmic distribution used by * {d,p,q,r}logarithmic(); * 2. prob == 1 is point mass at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dztnbinom(double x, double size, double prob, int give_log) { /* We compute Pr[X = 0] with dbinom_raw() [as would eventually * dnbinom()] to take advantage of all the optimizations for * small/large values of 'prob' and 'size' (and also to skip some * validity tests). */ #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob)) return x + size + prob; #endif if (prob <= 0 || prob > 1 || size < 0) return R_NaN; if (x < 1 || !R_FINITE(x)) return ACT_D__0; /* limiting case as size -> 0 is logarithmic */ if (size == 0) return dlogarithmic(x, 1 - prob, give_log); /* limiting case as prob -> 1 is point mass at one */ if (prob == 1) return (x == 1) ? ACT_D__1 : ACT_D__0; double lp0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/1); return ACT_D_val(dnbinom(x, size, prob, /*give_log*/0)/(-expm1(lp0))); } double pztnbinom(double x, double size, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob)) return x + size + prob; #endif if (prob <= 0 || prob > 1 || size < 0) return R_NaN; if (x < 1) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; /* limiting case as size -> 0 is logarithmic */ if (size == 0) return plogarithmic(x, 1 - prob, lower_tail, log_p); /* limiting case as prob -> 1 is point mass at one */ if (prob == 1) return (x >= 1) ? ACT_DT_1 : ACT_DT_0; double lp0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/1); return ACT_DT_Cval(pnbinom(x, size, prob, /*l._t.*/0, /*log_p*/0)/(-expm1(lp0))); } double qztnbinom(double p, double size, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(size) || ISNAN(prob)) return p + size + prob; #endif if (prob <= 0 || prob > 1 || size < 0) return R_NaN; /* limiting case as size -> 0 is logarithmic */ if (size == 0) return qlogarithmic(p, 1 - prob, lower_tail, log_p); ACT_Q_P01_check(p); /* limiting case as prob -> 1 is point mass at one */ if (prob == 1) return 1.0; if (p == ACT_DT_0) return 1.0; if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); double p0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/0); return qnbinom(p0 + (1 - p0) * p, size, prob, /*l._t.*/1, /*log_p*/0); } double rztnbinom(double size, double prob) { if (!R_FINITE(prob) || prob <= 0 || prob > 1 || size < 0) return R_NaN; /* limiting case as size -> 0 is logarithmic */ if (size == 0) return rlogarithmic(1 - prob); /* limiting case as prob -> 1 is point mass at one */ if (prob == 1) return 1.0; double p0 = dbinom_raw(size, size, prob, 1 - prob, /*give_log*/0); return qnbinom(runif(p0, 1), size, prob, /*l._t.*/1, /*log_p*/0); } actuar/src/ztgeom.c0000644000176200001440000000450315151206331013770 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-truncated geometric distribution. See * ../R/ZeroTruncatedGeometric.R for details. * * Let X ~ Geometric(prob). The probability mass function of the * zero-truncated Geometric random variable Z is * * Pr[Z = 0] = 0 * Pr[Z = x] = Pr[X = x]/(1 - prob), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = (Pr[X <= x] - prob)/(1 - prob) * * Limiting case: prob == 1 is point mass at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dztgeom(double x, double prob, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(prob)) return x + prob; #endif if (prob <= 0 || prob > 1) return R_NaN; if (x < 1 || !R_FINITE(x)) return ACT_D__0; /* limiting case as prob -> 1 is point mass at one */ if (prob == 1) return (x == 1) ? ACT_D__1 : ACT_D__0; return ACT_D_val(dgeom(x - 1, prob, /*give_log*/0)); } double pztgeom(double x, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(prob)) return x + prob; #endif if (prob <= 0 || prob > 1) return R_NaN; if (x < 1) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; /* limiting case as prob -> 1 is point mass at one */ if (prob == 1) return (x >= 1) ? ACT_DT_1 : ACT_DT_0; return ACT_DT_Cval(pgeom(x - 1, prob, /*l._t.*/0, /*log_p*/0)); } double qztgeom(double p, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(prob)) return p + prob; #endif if (prob <= 0 || prob > 1) return R_NaN; ACT_Q_P01_check(p); /* limiting case as prob -> 1 is point mass at one */ if (prob == 1) return 1.0; if (p == ACT_DT_0) return 1.0; if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); return 1 + qgeom(p, prob, /*l._t.*/1, /*log_p*/0); } double rztgeom(double prob) { if (!R_FINITE(prob) || prob <= 0 || prob > 1) return R_NaN; /* limiting case as p -> 1 is point mass at one */ if (prob == 1) return 1.0; return 1 + rpois(exp_rand() * ((1 - prob) / prob)); } actuar/src/gumbel.c0000644000176200001440000000626615147745722013767 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the Gumbel distribution. See ../R/Gumbel.R for * details. * * We work with the density expressed as * * e^(-u) * e^(-e^(-u)) / scale * * with u = (x - alpha)/scale. * * AUTHOR: Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dgumbel(double x, double alpha, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(alpha) || ISNAN(scale)) return x + alpha + scale; #endif if (!R_FINITE(scale)) return ACT_D__0; if (!R_FINITE(x) && alpha == x) return R_NaN; /* x - alpha is NaN */ if (scale <= 0) { if (scale < 0) return R_NaN; /* scale == 0 */ return (x == alpha) ? R_PosInf : ACT_D__0; } x = (x - alpha) / scale; if (!R_FINITE(x)) return ACT_D__0; return ACT_D_exp(-(x + exp(-x) + log(scale))); } double pgumbel(double q, double alpha, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(alpha) || ISNAN(scale)) return q + alpha + scale; #endif if (!R_FINITE(q) && alpha == q) return R_NaN; /* q - alpha is NaN */ if (scale <= 0) { if (scale < 0) return R_NaN; /* scale == 0 : */ return (q < alpha) ? ACT_DT_0 : ACT_DT_1; } double p = (q - alpha) / scale; if (!R_FINITE(p)) return (q < alpha) ? ACT_DT_0 : ACT_DT_1; q = p; return ACT_DT_val(exp(-exp(-q))); } double qgumbel(double p, double alpha, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(alpha) || ISNAN(scale)) return p + alpha + scale; #endif if (!R_FINITE(alpha) || !R_FINITE(scale) || scale <= 0.0) return R_NaN;; ACT_Q_P01_boundaries(p, R_NegInf, R_PosInf); p = ACT_DT_qIv(p); return alpha - scale * log(-log(p)); } double rgumbel(double alpha, double scale) { if (!R_FINITE(alpha) || !R_FINITE(scale) || scale <= 0.0) return R_NaN;; return alpha - scale * log(exp_rand()); } #define EULER_CNST 0.577215664901532860606512090082 double mgumbel(double order, double alpha, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(alpha) || ISNAN(scale)) return order + alpha + scale; #endif if (!R_FINITE(alpha) || !R_FINITE(scale) || !R_FINITE(order) || scale <= 0.0 || order <= 0.0 || order > 2.0) return R_NaN; if (order == 1.0) return alpha + EULER_CNST * scale; if (order == 2.0) return R_pow_di(M_PI * scale, 2)/6 + R_pow_di(alpha + EULER_CNST * scale, 2); return R_NaN; /* order != 1 or 2 */ } double mgfgumbel(double t, double alpha, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(t) || ISNAN(alpha) || ISNAN(scale)) return t + alpha + scale; #endif if (!R_FINITE(alpha) || !R_FINITE(scale) || scale <= 0.0 || scale * t < 1.0) return R_NaN; if (t == 0.0) return ACT_D__1; return ACT_D_exp(alpha * t + lgamma(1 - scale * t)); } actuar/src/dpqphtype.c0000644000176200001440000001654615147745722014534 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability density, cumulative probability * and moment generating functions, and raw moments for phase-type * distributions. This file is based on dpq.c with the following * modifications: * * 1. support for a matrix argument; * 2. no iteration over the parameters; * 3. support for two parameter distributions only; * 4. many sanity checks on the arguments that are done in the * {d,p,r,m,mgf} functions for other probability laws are done * here because of item 2 above. * * Note that the "q" in the functions and file names was retained for * symmetry reasons only, since the quantile function is not * otherwise supported. * * For details, see dpq.c. * * AUTHOR: Vincent Goulet */ #include #include #include "actuar.h" #include "locale.h" /* Prototypes of auxiliary functions */ static SEXP dpqphtype2_1(SEXP, SEXP, SEXP, SEXP, double (*f)(double, double *, double *, int, int)); static SEXP dpqphtype2_2(SEXP, SEXP, SEXP, SEXP, SEXP, double (*f)(double, double *, double *, int, int, int)); #define if_NA_dpqphtype2_set(y, x) \ if (ISNA (x) || naargs) y = NA_REAL; \ else if (ISNAN(x) || nanargs) y = R_NaN; \ else if (naflag) y = R_NaN; static SEXP dpqphtype2_1(SEXP sx, SEXP sa, SEXP sb, SEXP sI, double (*f)(double, double *, double *, int, int)) { SEXP sy, bdims; R_xlen_t i, j, ij, n, m; double tmp1, tmp2, *x, *a, *b, *y; /* Flags used in sanity check of arguments. Listed from highest to * lowest priority. */ Rboolean naargs = FALSE, nanargs = FALSE, naflag = FALSE; #define SETUP_DPQPHTYPE2 \ if (!isNumeric(sx) || !isNumeric(sa) || !isMatrix(sb)) \ error(_("invalid arguments")); \ \ n = XLENGTH(sx); \ if (n == 0) \ return(allocVector(REALSXP, 0)); \ \ m = XLENGTH(sa); \ bdims = getAttrib(sb, R_DimSymbol); \ if (INTEGER(bdims)[0] != INTEGER(bdims)[1] || \ INTEGER(bdims)[0] != m) \ naflag = TRUE; \ \ PROTECT(sx = coerceVector(sx, REALSXP)); \ PROTECT(sa = coerceVector(sa, REALSXP)); \ PROTECT(sb = coerceVector(sb, REALSXP)); \ PROTECT(sy = allocVector(REALSXP, n)); \ x = REAL(sx); \ a = REAL(sa); \ b = REAL(sb); \ y = REAL(sy); \ \ tmp1 = 0.0; \ for (i = 0; i < m && !naargs && !nanargs && !naflag; i++) \ { \ if ((naargs = ISNA(a[i]))) \ break; \ if ((nanargs = ISNAN(a[i]))) \ break; \ tmp1 += a[i]; \ tmp2 = 0.0; \ for (j = 0; j < m; j++) \ { \ ij = i + j * m; \ if ((naargs = ISNA(b[ij]))) \ break; \ if ((nanargs = ISNAN(b[ij]))) \ break; \ if (i == j && (naflag = b[ij] >= 0)) \ break; \ if (i != j && (naflag = b[ij] < 0)) \ break; \ tmp2 += b[ij]; \ } \ if (!(naargs || nanargs)) \ naflag = tmp2 > 0; \ } \ if (!(naargs || nanargs)) \ naflag = tmp1 > 1 SETUP_DPQPHTYPE2; int i_1 = asInteger(sI); for (i = 0; i < n; i++) { if_NA_dpqphtype2_set(y[i], x[i]) else { y[i] = f(x[i], a, b, m, i_1); if (ISNAN(y[i])) naflag = TRUE; } } #define FINISH_DPQPHTYPE2 \ if (naflag) \ warning(R_MSG_NA); \ \ SHALLOW_DUPLICATE_ATTRIB(sy, sx); \ UNPROTECT(4) FINISH_DPQPHTYPE2; return sy; } static SEXP dpqphtype2_2(SEXP sx, SEXP sa, SEXP sb, SEXP sI, SEXP sJ, double (*f)(double, double *, double *, int, int, int)) { SEXP sy, bdims; R_xlen_t i, j, ij, n, m; double tmp1, tmp2, *x, *a, *b, *y; /* Flags used in sanity check of arguments. Listed from highest to * lowest priority. */ Rboolean naargs = FALSE, nanargs = FALSE, naflag = FALSE; SETUP_DPQPHTYPE2; int i_1 = asInteger(sI), i_2 = asInteger(sJ); for (i = 0; i < n; i++) { if_NA_dpqphtype2_set(y[i], x[i]) else { y[i] = f(x[i], a, b, m, i_1, i_2); if (ISNAN(y[i])) naflag = TRUE; } } FINISH_DPQPHTYPE2; return sy; } #define DPQPHTYPE2_1(A, FUN) dpqphtype2_1(CAR(A), CADR(A), CADDR(A), CADDDR(A), FUN); #define DPQPHTYPE2_2(A, FUN) dpqphtype2_2(CAR(A), CADR(A), CADDR(A), CADDDR(A), CAD4R(A), FUN) SEXP actuar_do_dpqphtype2(int code, SEXP args) { switch (code) { case 1: return DPQPHTYPE2_1(args, dphtype); case 2: return DPQPHTYPE2_2(args, pphtype); case 3: return DPQPHTYPE2_1(args, mphtype); case 4: return DPQPHTYPE2_1(args, mgfphtype); default: error(_("internal error in actuar_do_dpqphtype2")); } return args; /* never used; to keep -Wall happy */ } /* Main function, the only one used by .External(). */ SEXP actuar_do_dpqphtype(SEXP args) { int i; const char *name; /* Extract distribution name */ args = CDR(args); name = CHAR(STRING_ELT(CAR(args), 0)); /* Dispatch to actuar_do_dpqphtype{1,2,3,4,5} */ for (i = 0; dpq_tab[i].name; i++) if (!strcmp(dpq_tab[i].name, name)) return dpq_tab[i].cfun(dpq_tab[i].code, CDR(args)); /* No dispatch is an error */ error("internal error in actuar_do_dpqphtype"); return args; /* never used; to keep -Wall happy */ } actuar/src/genbeta.c0000644000176200001440000001364115147745722014114 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the generalized beta distribution. See ../R/GeneralizedBeta.R * for details. * * We work with the density expressed as * * shape3 * u^shape1 * (1 - u)^(shape2 - 1) / (x * beta(shape1, shape2)) * * with u = (x/scale)^shape3. * * Code for limiting cases derived from .../src/nmath/dbeta.c from R * sources. * * AUTHOR: Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dgenbeta(double x, double shape1, double shape2, double shape3, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return x + shape1 + shape2 + shape3 + scale; #endif if (shape1 < 0.0 || shape2 < 0.0 || shape3 < 0.0 || scale <= 0.0) return R_NaN; if (x < 0.0 || x > scale) return ACT_D__0; /* limiting cases for (shape1 * shape3, shape2), leading to point masses */ double psh = shape1 * shape3; if (psh == 0.0 || shape2 == 0.0 || !R_FINITE(psh) || !R_FINITE(shape2)) { /* shape1 or shape3 = 0, shape2 = 0: point mass 1/2 at endpoints */ if (psh == 0.0 && shape2 == 0.0) return (x == 0 || x == scale) ? R_PosInf : ACT_D__0; /* shape1 or shape3 = 0, shape2 != 0: point mass 1 at 0 */ if (psh == 0.0 || psh/shape2 == 0.0) return (x == 0.0) ? R_PosInf : ACT_D__0; /* shape2 = 0, shape1 and shape3 != 0: point mass 1 at scale */ if (shape2 == 0.0 || shape2/psh == 0.0) return (x == scale) ? R_PosInf : ACT_D__0; /* remaining cases: shape1 or shape3 = Inf, shape2 = Inf */ if (R_FINITE(shape3)) /* shape3 < Inf: point mass 1 at midpoint */ return (x == scale/2.0) ? R_PosInf : ACT_D__0; else /* shape3 = Inf: point mass at scale */ return (x == scale) ? R_PosInf : ACT_D__0; } if (x == 0.0) { if (psh > 1) return(ACT_D__0); if (psh < 1) return(R_PosInf); /* psh == 1 : */ return(ACT_D_val(shape3/beta(shape1, shape2))); } if (x == scale) { if (shape2 > 1) return(ACT_D__0); if (shape2 < 1) return(R_PosInf); /* shape2 == 1 : */ return(ACT_D_val(shape1 * shape3)); } double logu, log1mu; logu = shape3 * (log(x) - log(scale)); log1mu = log1p(-exp(logu)); return ACT_D_exp(log(shape3) + shape1 * logu + (shape2 - 1.0) * log1mu - log(x) - lbeta(shape1, shape2)); } double pgenbeta(double q, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return q + shape1 + shape2 + shape3 + scale; #endif if (shape1 < 0.0 || shape2 < 0.0 || shape3 < 0.0 || scale <= 0.0) return R_NaN; if (q <= 0) return ACT_DT_0; if (q >= scale) return ACT_DT_1; double u = exp(shape3 * (log(q) - log(scale))); return pbeta(u, shape1, shape2, lower_tail, log_p); } double qgenbeta(double p, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return p + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; ACT_Q_P01_boundaries(p, 0, scale); p = ACT_D_qIv(p); return scale * R_pow(qbeta(p, shape1, shape2, lower_tail, 0), 1.0/shape3); } double rgenbeta(double shape1, double shape2, double shape3, double scale) { if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; return scale * R_pow(rbeta(shape1, shape2), 1.0/shape3); } double mgenbeta(double order, double shape1, double shape2, double shape3, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale)) return order + shape1 + shape2 + shape3 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= - shape1 * shape3) return R_PosInf; double tmp = order / shape3; return R_pow(scale, order) * beta(shape1 + tmp, shape2) / beta(shape1, shape2); } double levgenbeta(double limit, double shape1, double shape2, double shape3, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(shape3) || ISNAN(scale) || ISNAN(order)) return limit + shape1 + shape2 + shape3 + scale + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(shape3) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || shape3 <= 0.0 || scale <= 0.0) return R_NaN; if (order <= - shape1 * shape3) return R_PosInf; if (limit <= 0.0) return 0.0; double u, tmp; tmp = order / shape3; u = exp(shape3 * (log(limit) - log(scale))); return R_pow(scale, order) * beta(shape1 + tmp, shape2) / beta(shape1, shape2) * pbeta(u, shape1 + tmp, shape2, 1, 0) + ACT_DLIM__0(limit, order) * pbeta(u, shape1, shape2, 0, 0); } actuar/src/locale.h0000644000176200001440000000023715147745722013750 0ustar liggesusers/* Localization */ #include #ifdef ENABLE_NLS #include #define _(String) dgettext ("actuar", String) #else #define _(String) (String) #endif actuar/src/invtrgamma.c0000644000176200001440000001012715147745722014650 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute density, cumulative distribution and quantile * functions, raw and limited moments and to simulate random variates * for the inverse transformed gamma distribution. See * ../R/InverseTransformedGamma.R for details. * * We work with the density expressed as * * shape2 * u^shape1 * e^(-u) / (x * gamma(shape1)) * * with u = (scale/x)^shape2. * * AUTHORS: Mathieu Pigeon and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dinvtrgamma(double x, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return x + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale < 0.0) return R_NaN; /* handle also x == 0 here */ if (!R_FINITE(x) || x <= 0.0) return ACT_D__0; double logu = shape2 * (log(scale) - log(x)); return ACT_D_exp(log(shape2) + shape1 * logu - exp(logu) - log(x) - lgammafn(shape1)); } double pinvtrgamma(double q, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(q) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return q + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale < 0.0) return R_NaN;; if (q <= 0) return ACT_DT_0; double u = exp(shape2 * (log(scale) - log(q))); return pgamma(u, shape1, 1.0, !lower_tail, log_p); } double qinvtrgamma(double p, double shape1, double shape2, double scale, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return p + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN;; ACT_Q_P01_boundaries(p, 0, R_PosInf); p = ACT_D_qIv(p); return scale * R_pow(qgamma(p, shape1, 1.0, !lower_tail, 0), -1.0/shape2); } double rinvtrgamma(double shape1, double shape2, double scale) { if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN;; return scale * R_pow(rgamma(shape1, 1.0), -1.0/shape2); } double minvtrgamma(double order, double shape1, double shape2, double scale, int give_log) { #ifdef IEEE_754 if (ISNAN(order) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale)) return order + shape1 + shape2 + scale; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (order >= shape1 * shape2) return R_PosInf; return R_pow(scale, order) * gammafn(shape1 - order / shape2) / gammafn(shape1); } double levinvtrgamma(double limit, double shape1, double shape2, double scale, double order, int give_log) { #ifdef IEEE_754 if (ISNAN(limit) || ISNAN(shape1) || ISNAN(shape2) || ISNAN(scale) || ISNAN(order)) return limit + shape1 + shape2 + scale + order; #endif if (!R_FINITE(shape1) || !R_FINITE(shape2) || !R_FINITE(scale) || !R_FINITE(order) || shape1 <= 0.0 || shape2 <= 0.0 || scale <= 0.0) return R_NaN; if (limit <= 0.0) return 0.0; double u = exp(shape2 * (log(scale) - log(limit))); return R_pow(scale, order) * actuar_gamma_inc(shape1 - order/shape2, u) / gammafn(shape1) + ACT_DLIM__0(limit, order) * pgamma(u, shape1, 1.0, 1, 0); } actuar/src/random.c0000644000176200001440000003757215147745722014000 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to generate variates of some probability laws not in * base R. Function .External() calls actuar_do_random() with * arguments: * * 1. the name of the distribution from which to simulate, with * an "r" prepended to it (e.g. "rpareto"); * 2. the number of variates; * 3:x. the parameters of the distribution. * * Function actuar_do_random() will extract the name of the * distribution, look up in table random_tab defined in names.c which of * actuar_do_random{1,2,3,4} should take care of the simulation and * dispatch to this function. In turn, functions * actuar_do_random{1,2,3,4} call function rdist() to get actual * variates from distribution "dist". * * This scheme is essentially what is used in base R (see files * src/main/random.c, src/main/names.c) with add-ons taken from * src/library/stats/src/random.c to support return values that can * be either real or integer. * * To add a new distribution: write an rdist() function, add an entry * in names.c and in the definition of the corresponding * actuar_do_random{1,2,3,4} function, declare the function in * actuar.h. * * AUTHOR: Vincent Goulet * with much indirect help from the R Core Team */ #include #include #include "actuar.h" #include "locale.h" /* Prototypes of auxiliary functions */ static Rboolean random1(double (*f)(double), double *, int, SEXP, int, SEXPTYPE); static Rboolean random2(double (*f)(double, double), double *, int, double *, int, SEXP, int, SEXPTYPE); static Rboolean random3(double (*f)(double, double, double), double *, int, double *, int, double *, int, SEXP, int, SEXPTYPE); static Rboolean random4(double (*f)(double, double, double, double), double *, int, double *, int, double *, int, double *, int, SEXP, int, SEXPTYPE); static Rboolean random5(double (*f)(double, double, double, double, double), double *, int, double *, int, double *, int, double *, int, double *, int, SEXP, int, SEXPTYPE); /* Additional access macros */ #define CAD5R(e) CAR(CDR(CDR(CDR(CDR(CDR(e)))))) /* Utility function used in actuar_do_random{1,2,3,4}. */ static void fill_with_NAs(SEXP x, int n, SEXPTYPE type) { int i; if (type == INTSXP) { for (i = 0; i < n; i++) { INTEGER(x)[i] = NA_INTEGER; } } else { /* REALSXP */ for (i = 0; i < n; i++) { REAL(x)[i] = NA_REAL; } } warning(_("NAs produced")); } /* Functions for one parameter distributions */ static Rboolean random1(double (*f)(double), double *a, int na, SEXP x, int n, SEXPTYPE type) { int i; Rboolean naflag = FALSE; if (type == INTSXP) { double rx; int *ix = INTEGER(x); for (i = 0; i < n; i++) { rx = f(a[i % na]); if (ISNAN(rx) || rx > INT_MAX || rx <= INT_MIN) { naflag = TRUE; ix[i] = NA_INTEGER; } else ix[i] = (int) rx; } } else /* REALSXP */ { double *rx = REAL(x); for (i = 0; i < n; i++) { rx[i] = f(a[i % na]); if (ISNAN(rx[i])) naflag = TRUE; } } return(naflag); } #define RAND1(num, fun) \ case num: \ naflag = random1(fun, REAL(a), na, x, n, type); \ break SEXP actuar_do_random1(int code, SEXP args, SEXPTYPE type) { SEXP x, a; int n, na; /* Check validity of arguments */ if (!isVector(CAR(args)) || !isNumeric(CADR(args))) error(_("invalid arguments")); /* Number of variates to generate */ if (LENGTH(CAR(args)) == 1) { n = asInteger(CAR(args)); if (n == NA_INTEGER || n < 0) error(_("invalid arguments")); } else n = LENGTH(CAR(args)); /* If n == 0, return numeric(0) */ PROTECT(x = allocVector(type, n)); if (n == 0) { UNPROTECT(1); return(x); } /* If length of parameters < 1, return NaN */ na = LENGTH(CADR(args)); if (na < 1) fill_with_NAs(x, n, type); /* Otherwise, dispatch to appropriate r* function */ else { Rboolean naflag = FALSE; PROTECT(a = coerceVector(CADR(args), REALSXP)); GetRNGstate(); switch (code) { RAND1(1, rinvexp); RAND1(101, rlogarithmic); RAND1(102, rztpois); RAND1(103, rztgeom); default: error(_("internal error in actuar_do_random1")); } if (naflag) warning(R_MSG_NA); PutRNGstate(); UNPROTECT(1); } UNPROTECT(1); return x; } /* Functions for two parameter distributions */ static Rboolean random2(double (*f)(double, double), double *a, int na, double *b, int nb, SEXP x, int n, SEXPTYPE type) { int i; Rboolean naflag = FALSE; if (type == INTSXP) { double rx; int *ix = INTEGER(x); for (i = 0; i < n; i++) { rx = f(a[i % na], b[i % nb]); if (ISNAN(rx) || rx > INT_MAX || rx <= INT_MIN) { naflag = TRUE; ix[i] = NA_INTEGER; } else ix[i] = (int) rx; } } else /* REALSXP */ { double *rx = REAL(x); for (i = 0; i < n; i++) { rx[i] = f(a[i % na], b[i % nb]); if (ISNAN(rx[i])) naflag = TRUE; } } return(naflag); } #define RAND2(num, fun) \ case num: \ naflag = random2(fun, REAL(a), na, REAL(b), nb, x, n, type); \ break SEXP actuar_do_random2(int code, SEXP args, SEXPTYPE type) { SEXP x, a, b; int n, na, nb; /* Check validity of arguments */ if (!isVector(CAR(args)) || !isNumeric(CADR(args)) || !isNumeric(CADDR(args))) error(_("invalid arguments")); /* Number of variates to generate */ if (LENGTH(CAR(args)) == 1) { n = asInteger(CAR(args)); if (n == NA_INTEGER || n < 0) error(_("invalid arguments")); } else n = LENGTH(CAR(args)); /* If n == 0, return numeric(0) */ PROTECT(x = allocVector(type, n)); if (n == 0) { UNPROTECT(1); return(x); } /* If length of parameters < 1, return NA */ na = LENGTH(CADR(args)); nb = LENGTH(CADDR(args)); if (na < 1 || nb < 1) fill_with_NAs(x, n, type); /* Otherwise, dispatch to appropriate r* function */ else { Rboolean naflag = FALSE; PROTECT(a = coerceVector(CADR(args), REALSXP)); PROTECT(b = coerceVector(CADDR(args), REALSXP)); GetRNGstate(); switch (code) { RAND2( 1, rinvgamma); RAND2( 2, rinvparalogis); RAND2( 3, rinvpareto); RAND2( 4, rinvweibull); RAND2( 5, rlgamma); RAND2( 6, rllogis); RAND2( 7, rparalogis); RAND2( 8, rpareto); RAND2( 9, rpareto1); RAND2( 10, rgumbel); RAND2( 11, rinvgauss); RAND2(101, rztnbinom); RAND2(102, rztbinom); RAND2(103, rzmlogarithmic); RAND2(104, rzmpois); RAND2(105, rzmgeom); RAND2(106, rpoisinvgauss); default: error(_("internal error in actuar_do_random2")); } if (naflag) warning(R_MSG_NA); PutRNGstate(); UNPROTECT(2); } UNPROTECT(1); return x; } /* Functions for three parameter distributions */ static Rboolean random3(double (*f)(double, double, double), double *a, int na, double *b, int nb, double *c, int nc, SEXP x, int n, SEXPTYPE type) { int i; Rboolean naflag = FALSE; if (type == INTSXP) { double rx; int *ix = INTEGER(x); for (i = 0; i < n; i++) { rx = f(a[i % na], b[i % nb], c[i % nc]); if (ISNAN(rx) || rx > INT_MAX || rx <= INT_MIN) { naflag = TRUE; ix[i] = NA_INTEGER; } else ix[i] = (int) rx; } } else /* REALSXP */ { double *rx = REAL(x); for (i = 0; i < n; i++) { rx[i] = f(a[i % na], b[i % nb], c[i % nc]); if (ISNAN(rx[i])) naflag = TRUE; } } return(naflag); } #define RAND3(num, fun) \ case num: \ naflag = random3(fun, REAL(a), na, REAL(b), nb, REAL(c), nc, x, n, type); \ break SEXP actuar_do_random3(int code, SEXP args, SEXPTYPE type) { SEXP x, a, b, c; int n, na, nb, nc; /* Check validity of arguments */ if (!isVector(CAR(args)) || !isNumeric(CADR(args)) || !isNumeric(CADDR(args)) || !isNumeric(CADDDR(args))) error(_("invalid arguments")); /* Number of variates to generate */ if (LENGTH(CAR(args)) == 1) { n = asInteger(CAR(args)); if (n == NA_INTEGER || n < 0) error(_("invalid arguments")); } else n = LENGTH(CAR(args)); /* If n == 0, return numeric(0) */ PROTECT(x = allocVector(type, n)); if (n == 0) { UNPROTECT(1); return(x); } /* If length of parameters < 1, return NaN */ na = LENGTH(CADR(args)); nb = LENGTH(CADDR(args)); nc = LENGTH(CADDDR(args)); if (na < 1 || nb < 1 || nc < 1) fill_with_NAs(x, n, type); /* Otherwise, dispatch to appropriate r* function */ else { Rboolean naflag = FALSE; PROTECT(a = coerceVector(CADR(args), REALSXP)); PROTECT(b = coerceVector(CADDR(args), REALSXP)); PROTECT(c = coerceVector(CADDDR(args), REALSXP)); GetRNGstate(); switch (code) { RAND3( 1, rburr); RAND3( 2, rgenpareto); RAND3( 3, rinvburr); RAND3( 4, rinvtrgamma); RAND3( 5, rtrgamma); RAND3( 6, rpareto2); RAND3( 7, rpareto3); RAND3(101, rzmnbinom); RAND3(102, rzmbinom); default: error(_("internal error in actuar_do_random3")); } if (naflag) warning(R_MSG_NA); PutRNGstate(); UNPROTECT(3); } UNPROTECT(1); return x; } /* Functions for four parameter distributions */ static Rboolean random4(double (*f)(double, double, double, double), double *a, int na, double *b, int nb, double *c, int nc, double *d, int nd, SEXP x, int n, SEXPTYPE type) { int i; Rboolean naflag = FALSE; if (type == INTSXP) { double rx; int *ix = INTEGER(x); for (i = 0; i < n; i++) { rx = f(a[i % na], b[i % nb], c[i % nc], d[i % nd]); if (ISNAN(rx) || rx > INT_MAX || rx <= INT_MIN) { naflag = TRUE; ix[i] = NA_INTEGER; } else ix[i] = (int) rx; } } else /* REALSXP */ { double *rx = REAL(x); for (i = 0; i < n; i++) { rx[i] = f(a[i % na], b[i % nb], c[i % nc], d[i % nd]); if (ISNAN(rx[i])) naflag = TRUE; } } return(naflag); } #define RAND4(num, fun) \ case num: \ naflag = random4(fun, REAL(a), na, REAL(b), nb, REAL(c), nc, REAL(d), nd, x, n, type); \ break SEXP actuar_do_random4(int code, SEXP args, SEXPTYPE type) { SEXP x, a, b, c, d; int n, na, nb, nc, nd; /* Check validity of arguments */ if (!isVector(CAR(args)) || !isNumeric(CADR(args)) || !isNumeric(CADDR(args)) || !isNumeric(CADDDR(args)) || !isNumeric(CAD4R(args))) error(_("invalid arguments")); /* Number of variates to generate */ if (LENGTH(CAR(args)) == 1) { n = asInteger(CAR(args)); if (n == NA_INTEGER || n < 0) error(_("invalid arguments")); } else n = LENGTH(CAR(args)); /* If n == 0, return numeric(0) */ PROTECT(x = allocVector(type, n)); if (n == 0) { UNPROTECT(1); return(x); } /* If length of parameters < 1, return NaN */ na = LENGTH(CADR(args)); nb = LENGTH(CADDR(args)); nc = LENGTH(CADDDR(args)); nd = LENGTH(CAD4R(args)); if (na < 1 || nb < 1 || nc < 1 || nd < 1) fill_with_NAs(x, n, type); /* Otherwise, dispatch to appropriate r* function */ else { Rboolean naflag = FALSE; PROTECT(a = coerceVector(CADR(args), REALSXP)); PROTECT(b = coerceVector(CADDR(args), REALSXP)); PROTECT(c = coerceVector(CADDDR(args), REALSXP)); PROTECT(d = coerceVector(CAD4R(args), REALSXP)); GetRNGstate(); switch (code) { RAND4(1, rtrbeta); RAND4(2, rgenbeta); RAND4(3, rpareto4); default: error(_("internal error in actuar_do_random4")); } if (naflag) warning(R_MSG_NA); PutRNGstate(); UNPROTECT(4); } UNPROTECT(1); return x; } /* Functions for Five parameter distributions */ static Rboolean random5(double (*f)(double, double, double, double, double), double *a, int na, double *b, int nb, double *c, int nc, double *d, int nd, double *e, int ne, SEXP x, int n, SEXPTYPE type) { int i; Rboolean naflag = FALSE; if (type == INTSXP) { double rx; int *ix = INTEGER(x); for (i = 0; i < n; i++) { rx = f(a[i % na], b[i % nb], c[i % nc], d[i % nd], e[i % ne]); if (ISNAN(rx) || rx > INT_MAX || rx <= INT_MIN) { naflag = TRUE; ix[i] = NA_INTEGER; } else ix[i] = (int) rx; } } else /* REALSXP */ { double *rx = REAL(x); for (i = 0; i < n; i++) { rx[i] = f(a[i % na], b[i % nb], c[i % nc], d[i % nd], e[i % nd]); if (ISNAN(rx[i])) naflag = TRUE; } } return(naflag); } #define RAND5(num, fun) \ case num: \ naflag = random5(fun, REAL(a), na, REAL(b), nb, REAL(c), nc, REAL(d), nd, REAL(e), ne, x, n, type); \ break SEXP actuar_do_random5(int code, SEXP args, SEXPTYPE type) { SEXP x, a, b, c, d, e; int n, na, nb, nc, nd, ne; /* Check validity of arguments */ if (!isVector(CAR(args)) || !isNumeric(CADR(args)) || !isNumeric(CADDR(args)) || !isNumeric(CADDDR(args)) || !isNumeric(CAD4R(args)) || !isNumeric(CAD5R(args))) error(_("invalid arguments")); /* Number of variates to generate */ if (LENGTH(CAR(args)) == 1) { n = asInteger(CAR(args)); if (n == NA_INTEGER || n < 0) error(_("invalid arguments")); } else n = LENGTH(CAR(args)); /* If n == 0, return numeric(0) */ PROTECT(x = allocVector(type, n)); if (n == 0) { UNPROTECT(1); return(x); } /* If length of parameters < 1, return NaN */ na = LENGTH(CADR(args)); nb = LENGTH(CADDR(args)); nc = LENGTH(CADDDR(args)); nd = LENGTH(CAD4R(args)); ne = LENGTH(CAD5R(args)); if (na < 1 || nb < 1 || nc < 1 || nd < 1 || ne < 1) fill_with_NAs(x, n, type); /* Otherwise, dispatch to appropriate r* function */ else { Rboolean naflag = FALSE; PROTECT(a = coerceVector(CADR(args), REALSXP)); PROTECT(b = coerceVector(CADDR(args), REALSXP)); PROTECT(c = coerceVector(CADDDR(args), REALSXP)); PROTECT(d = coerceVector(CAD4R(args), REALSXP)); PROTECT(e = coerceVector(CAD5R(args), REALSXP)); GetRNGstate(); switch (code) { RAND5(1, rfpareto); default: error(_("internal error in actuar_do_random5")); } if (naflag) warning(R_MSG_NA); PutRNGstate(); UNPROTECT(5); } UNPROTECT(1); return x; } /* Main function, the only one used by .External(). */ SEXP actuar_do_random(SEXP args) { int i; const char *name; /* Extract distribution name */ args = CDR(args); name = CHAR(STRING_ELT(CAR(args), 0)); /* Dispatch to actuar_do_random{1,2,3,4} */ for (i = 0; random_tab[i].name; i++) { if (!strcmp(random_tab[i].name, name)) return random_tab[i].cfun(random_tab[i].code, CDR(args), random_tab[i].type); } /* No dispatch is an error */ error(_("internal error in actuar_do_random")); return args; /* never used; to keep -Wall happy */ } actuar/src/dpq.h0000644000176200001440000000743715151206331013265 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Utilities for `dpq' handling (density/probability/quantile) * * These (except ACT_DLIM__0) are copied from src/nmath/dpq.h of R * sources with the names changed from "R_" to "ACT_". * * AUTHOR: Vincent Goulet * with much indirect help from the R Core Team */ /* give_log in "d" & "mgf"; log_p in "p" & "q" : */ #define give_log log_p #define ACT_D__0 (log_p ? R_NegInf : 0.) #define ACT_D__1 (log_p ? 0. : 1.) #define ACT_DT_0 (lower_tail ? ACT_D__0 : ACT_D__1) #define ACT_DT_1 (lower_tail ? ACT_D__1 : ACT_D__0) /* limiting cases in qzm */ #define ACT_Q_p0lim(p0) ((p0 == 0) ? 1.0 : 0.0) /* Use 0.5 - p + 0.5 to perhaps gain 1 bit of accuracy */ #define ACT_D_Lval(p) (lower_tail ? (p) : (0.5 - (p) + 0.5)) /* p */ #define ACT_D_Cval(p) (lower_tail ? (0.5 - (p) + 0.5) : (p)) /* 1 - p */ #define ACT_D_val(x) (log_p ? log(x) : (x)) /* x in pF(x,..) */ #define ACT_D_qIv(p) (log_p ? exp(p) : (p)) /* p in qF(p,..) */ #define ACT_DT_qIv(p) (log_p ? (lower_tail ? exp(p) : - expm1(p)) \ : ACT_D_Lval(p)) /* 1 - p in qF(p,..) */ #define ACT_DT_1mqIv(p) (log_p ? (lower_tail ? - expm1(p) : exp(p)) \ : ACT_D_Cval(p)) /* 1 - p in qF(p,..) */ #define ACT_D_exp(x) (log_p ? (x) : exp(x)) /* exp(x) */ #define ACT_D_Cexp(x) (log_p ? log(-expm1(x)) : (-expm1(x))) /* [log](1-exp(x)) */ #define ACT_D_Clog(p) (log_p ? log1p(-(p)) : (0.5 - (p) + 0.5)) /* [log](1-p) */ #define ACT_DT_val(x) (lower_tail ? ACT_D_val(x) : ACT_D_Clog(x)) #define ACT_DT_Eval(x) (lower_tail ? ACT_D_exp(x) : ACT_D_Cexp(x)) #define ACT_DT_Cval(x) (lower_tail ? ACT_D_Clog(x) : ACT_D_val(x)) #define ACT_DT_CEval(x) (lower_tail ? ACT_D_Cexp(x) : ACT_D_exp(x)) /* Check value of p for discrete q(p, ...) */ #define ACT_Q_P01_check(p) \ if ((log_p && p > 0) || \ (!log_p && (p < 0 || p > 1)) ) \ return R_NaN /* Check boundaries exactly for q(p, ...) functions. * * ACT_Q_P01_boundaries(p, _LEFT_, _RIGHT_) <==> * * ACT_Q_P01_check(p); * if (p == ACT_DT_0) return _LEFT_ ; * if (p == ACT_DT_1) return _RIGHT_; * * just more efficient(fewer tests). Used only for continuous * distributions; for discrete distributions we need additionnal * checks immediately after R_Q_P01_check. */ #define ACT_Q_P01_boundaries(p, _LEFT_, _RIGHT_) \ if (log_p) { \ if(p > 0) \ return R_NaN; \ if(p == 0) /* upper bound*/ \ return lower_tail ? _RIGHT_ : _LEFT_; \ if(p == R_NegInf) \ return lower_tail ? _LEFT_ : _RIGHT_; \ } \ else { /* !log_p */ \ if(p < 0 || p > 1) \ return R_NaN; \ if(p == 0) \ return lower_tail ? _LEFT_ : _RIGHT_; \ if(p == 1) \ return lower_tail ? _RIGHT_ : _LEFT_; \ } /* Infinite limit in lev 1e-7*fmax2(1., fabs(x))) /* Check for non-integer x in discrete d(x, ...) */ #define ACT_D_nonint_check(x) \ if (ACT_nonint(x)) { \ warning(_("non-integer x = %f"), x); \ return ACT_D__0; \ } actuar/src/zmbinom.c0000644000176200001440000001216315151206331014137 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-modified binomial distribution. See * ../R/ZeroModifiedBinomial.R for details. * * Let X ~ Binomial(size, prob). The probability mass function of the * zero-modified Binomial random variable Z is * * Pr[Z = 0] = p0m * Pr[Z = x] = (1 - p0m) * Pr[X = x]/(1 - (1 - prob)^size), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = 1 - (1 - p0m) * (1 - Pr[X <= x])/(1 - (1 - prob)^size). * * Limiting cases: * * 1. size == 0 has mass (1 - p0m) at x = 1; * 2. prob == 0 has mass (1 - p0m) at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" #include "actuar.h" double dzmbinom(double x, double size, double prob, double p0m, int give_log) { /* We compute Pr[X = 0] with dbinom_raw() [as would eventually * dbinom()] to take advantage of all the optimizations for * small/large values of 'prob' and 'size' (and also to skip some * validity tests). */ #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob) || ISNAN(p0m)) return x + size + prob + p0m; #endif if (prob < 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0 || !R_FINITE(x)) return ACT_D__0; if (x == 0) return ACT_D_val(p0m); /* NOTE: from now on x > 0 */ /* limiting cases as size -> 1 or prob -> 0 are mass (1-p0m) at one */ if (size == 1 || prob == 0) return (x == 1) ? ACT_D_Clog(p0m) : ACT_D__0; double lp0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/1); return ACT_D_val((1 - p0m) * dbinom(x, size, prob, /*give_log*/0)/(-expm1(lp0))); } double pzmbinom(double x, double size, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob) || ISNAN(p0m)) return x + size + prob + p0m; #endif if (prob < 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; if (x < 0) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; if (x < 1) return ACT_DT_val(p0m); /* NOTE: from now on x >= 1 */ /* limiting cases as size -> 1 or prob -> 0 are mass (1-p0m) at one */ if (size == 1 || prob == 0) return ACT_DT_1; double lp0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/1); /* working in log scale improves accuracy */ return ACT_DT_CEval(log1p(-p0m) + pbinom(x, size, prob, /*l._t.*/0, /*log_p*/1) - log1mexp(-lp0)); } double qzmbinom(double p, double size, double prob, double p0m, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(size) || ISNAN(prob) || ISNAN(p0m)) return p + size + prob + p0m; #endif if (prob < 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; ACT_Q_P01_check(p); if (p0m == 1) return 0.0; /* limiting cases as size -> 1 or prob -> 0 are mass (1-p0m) at one */ if (size == 1 || prob == 0) return (ACT_DT_qIv(p) <= p0m) ? ACT_Q_p0lim(p0m) : 1.0; if (p == ACT_DT_0) return ACT_Q_p0lim(p0m); if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); /* at this point 0 < p < 1, so p0m = 0 is not an issue */ /* working in log scale improves accuracy */ double lp0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/1); return qbinom(-expm1(log1mexp(-lp0) - log1p(-p0m) + log1p(-p)), size, prob, /*l._t.*/1, /*log_p*/0); } /* ALGORITHM FOR GENERATION OF RANDOM VARIATES * * 1. p0m >= p0: just simulate variates from the discrete mixture. * * 2. p0m < p0: fastest method depends on the difference p0 - p0m. * * 2.1 p0 - p0m < ACT_DIFFMAX_REJECTION: rejection method with an * envelope that differs from the target distribution at zero * only. In other words: rejection only at zero. * 2.2 p0 - p0m >= ACT_DIFFMAX_REJECTION: simulate variates from * discrete mixture with the corresponding zero truncated * distribution. * * The threshold ACT_DIFFMAX_REJECTION is distribution specific. */ #define ACT_DIFFMAX_REJECTION 0.9 double rzmbinom(double size, double prob, double p0m) { if (!R_FINITE(prob) || prob < 0 || prob > 1 || size < 0 || p0m < 0 || p0m > 1) return R_NaN; /* limiting cases as size -> 1 or prob -> 0 are mass (1-p0m) at one */ if (size == 1 || prob == 0) return (unif_rand() <= p0m) ? 0.0 : 1.0; double x, p0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/0); /* p0m >= p0: generate from mixture */ if (p0m >= p0) return (unif_rand() * (1 - p0) < (1 - p0m)) ? rbinom(size, prob) : 0.0; /* p0m < p0: choice of algorithm depends on difference p0 - p0m */ if (p0 - p0m < ACT_DIFFMAX_REJECTION) { /* rejection method */ for (;;) { x = rbinom(size, prob); if (x != 0 || /* x == 0 and */ runif(0, p0 * (1 - p0m)) <= (1 - p0) * p0m) return x; } } else { /* generate from zero truncated mixture */ return (unif_rand() <= p0m) ? 0.0 : qbinom(runif(p0, 1), size, prob, /*l._t.*/1, /*log_p*/0); } } actuar/src/ztbinom.c0000644000176200001440000000637115151206331014152 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Functions to compute probability function, cumulative distribution * and quantile functions, and to simulate random variates for the * zero-truncated binomial distribution. See * ../R/ZeroTruncatedBinomial.R for details. * * Let X ~ Binomial(size, prob). The probability mass function of the * zero-truncated Binomial random variable Z is * * Pr[Z = 0] = 0 * Pr[Z = x] = Pr[X = x]/(1 - (1 - prob)^size), x = 1, 2, ... * * The distribution function is, for all x = 0, 1, 2, ..., * * Pr[Z <= x] = (Pr[X <= x] - (1 - prob)^size)/(1 - (1 - prob)^size) * * Limiting cases: * * 1. size == 1 is point mass at x = 1; * 2. prob == 0 is point mass at x = 1. * * AUTHOR: Jérémy Déraspe and Vincent Goulet */ #include #include #include "locale.h" #include "dpq.h" double dztbinom(double x, double size, double prob, int give_log) { /* We compute Pr[X = 0] with dbinom_raw() [as would eventually * dbinom()] to take advantage of all the optimizations for * small/large values of 'prob' and 'size' (and also to skip some * validity tests). */ #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob)) return x + size + prob; #endif if (prob < 0 || prob > 1 || size < 1) return R_NaN; if (x < 1 || !R_FINITE(x)) return ACT_D__0; /* limiting cases as size -> 1 or prob -> 0 are point mass at one */ if (size == 1 || prob == 0) return (x == 1) ? ACT_D__1 : ACT_D__0; double lp0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/1); return ACT_D_val(dbinom(x, size, prob, /*give_log*/0)/(-expm1(lp0))); } double pztbinom(double x, double size, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(x) || ISNAN(size) || ISNAN(prob)) return x + size + prob; #endif if (prob < 0 || prob > 1 || size < 1) return R_NaN; if (x < 1) return ACT_DT_0; if (!R_FINITE(x)) return ACT_DT_1; /* limiting cases as size -> 1 or prob -> 0 are point mass at one */ if (size == 1 || prob == 0) return (x >= 1) ? ACT_DT_1 : ACT_DT_0; double lp0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/1); return ACT_DT_Cval(pbinom(x, size, prob, /*l._t.*/0, /*log_p*/0)/(-expm1(lp0))); } double qztbinom(double p, double size, double prob, int lower_tail, int log_p) { #ifdef IEEE_754 if (ISNAN(p) || ISNAN(size) || ISNAN(prob)) return p + size + prob; #endif if (prob < 0 || prob > 1 || size < 1) return R_NaN; ACT_Q_P01_check(p); /* limiting cases as size -> 1 or prob -> 0 are point mass at one */ if (size == 1 || prob == 0) return 1.0; if (p == ACT_DT_0) return 1.0; if (p == ACT_DT_1) return R_PosInf; p = ACT_DT_qIv(p); double p0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/0); return qbinom(p0 + (1 - p0) * p, size, prob, /*l._t.*/1, /*log_p*/0); } double rztbinom(double size, double prob) { if (!R_FINITE(prob) || prob < 0 || prob > 1 || size < 0) return R_NaN; /* limiting cases as size -> 1 or prob -> 0 are point mass at one */ if (size == 1 || prob == 0) return 1.0; double p0 = dbinom_raw(0, size, prob, 1 - prob, /*give_log*/0); return qbinom(runif(p0, 1), size, prob, /*l._t.*/1, /*log_p*/0); } actuar/NAMESPACE0000644000176200001440000001132515147745722012770 0ustar liggesusers### C code useDynLib(actuar, .registration = TRUE, .fixes = "C_") ### Imports import(expint) # for C code import(stats, graphics) importFrom(utils, head, tail) ### Exports export( ## Credibility theory cm, ## Simulation of insurance data rcompound, rcomppois, rmixture, rcomphierarc, simul, severity, unroll, ## Risk theory aggregateDist, CTE, TVaR, discretize, discretise, VaR, adjCoef, ruin, ## One parameter distributions dinvexp, pinvexp, qinvexp, rinvexp, minvexp, levinvexp, mexp, levexp, mgfexp, dlogarithmic, plogarithmic, qlogarithmic, rlogarithmic, dztpois, pztpois, qztpois, rztpois, dztgeom, pztgeom, qztgeom, rztgeom, ## Two parameter distributions munif, levunif, mgfunif, mnorm, mgfnorm, mbeta, levbeta, mgamma, levgamma, mgfgamma, mchisq, levchisq, mgfchisq, dinvgamma, pinvgamma, qinvgamma, rinvgamma, minvgamma, levinvgamma, mgfinvgamma, dinvparalogis, pinvparalogis, qinvparalogis, rinvparalogis, minvparalogis, levinvparalogis, dinvpareto, pinvpareto, qinvpareto, rinvpareto, minvpareto, levinvpareto, dinvweibull, pinvweibull, qinvweibull, rinvweibull, minvweibull, levinvweibull, dlgompertz, plgompertz, qlgompertz, rlgompertz, mlgompertz, levlgompertz, # aliases dlgamma, plgamma, qlgamma, rlgamma, mlgamma, levlgamma, dllogis, pllogis, qllogis, rllogis, mllogis, levllogis, mlnorm, levlnorm, dparalogis, pparalogis, qparalogis, rparalogis, mparalogis, levparalogis, dpareto, ppareto, qpareto, rpareto, mpareto, levpareto, dpareto1, ppareto1, qpareto1, rpareto1, mpareto1, levpareto1, mweibull, levweibull, ## minvGauss, levinvGauss, mgfinvGauss, [defunct v3.0-0] dgumbel, pgumbel, qgumbel, rgumbel, mgumbel, mgfgumbel, dinvgauss, pinvgauss, qinvgauss, rinvgauss, minvgauss, levinvgauss, mgfinvgauss, dztnbinom, pztnbinom, qztnbinom, rztnbinom, dztbinom, pztbinom, qztbinom, rztbinom, dzmlogarithmic, pzmlogarithmic, qzmlogarithmic, rzmlogarithmic, dzmpois, pzmpois, qzmpois, rzmpois, dzmgeom, pzmgeom, qzmgeom, rzmgeom, dpoisinvgauss, ppoisinvgauss, qpoisinvgauss, rpoisinvgauss, dpig, ppig, qpig, rpig, # aliases ## Three parameter distributions dburr, pburr, qburr, rburr, mburr, levburr, dgenpareto, pgenpareto, qgenpareto, rgenpareto, mgenpareto, levgenpareto, dinvburr, pinvburr, qinvburr, rinvburr, minvburr, levinvburr, dinvtrgamma, pinvtrgamma, qinvtrgamma, rinvtrgamma, minvtrgamma, levinvtrgamma, dtrgamma, ptrgamma, qtrgamma, rtrgamma, mtrgamma, levtrgamma, dpareto2, ppareto2, qpareto2, rpareto2, mpareto2, levpareto2, dpareto3, ppareto3, qpareto3, rpareto3, mpareto3, levpareto3, dzmnbinom, pzmnbinom, qzmnbinom, rzmnbinom, dzmbinom, pzmbinom, qzmbinom, rzmbinom, ## Four parameter distributions dgenbeta, pgenbeta, qgenbeta, rgenbeta, mgenbeta, levgenbeta, dtrbeta, ptrbeta, qtrbeta, rtrbeta, mtrbeta, levtrbeta, dpearson6, ppearson6, qpearson6, rpearson6, mpearson6, levpearson6, #aliases dpareto4, ppareto4, qpareto4, rpareto4, mpareto4, levpareto4, ## Five parameter distributions dfpareto, pfpareto, qfpareto, rfpareto, mfpareto, levfpareto, ## Phase-type distributions dphtype, pphtype, rphtype, mphtype, mgfphtype, ## Loss distributions grouped.data, ogive, emm, mde, elev, coverage, var, sd ) ### Methods S3method("[", grouped.data) S3method("[<-", grouped.data) S3method(aggregate, portfolio) S3method(CTE, aggregateDist) S3method(diff, aggregateDist) S3method(elev, default) S3method(elev, grouped.data) S3method(emm, default) S3method(emm, grouped.data) S3method(frequency, portfolio) S3method(hist, grouped.data) S3method(knots, ogive) S3method(knots, elev) S3method(mean, aggregateDist) S3method(mean, grouped.data) S3method(sd, default) S3method(sd, grouped.data) S3method(var, default) S3method(var, grouped.data) S3method(ogive, default) S3method(ogive, grouped.data) S3method(plot, adjCoef) S3method(plot, aggregateDist) S3method(plot, elev) S3method(plot, ogive) S3method(plot, ruin) S3method(predict, bstraub) S3method(predict, cm) S3method(predict, hache) S3method(predict, hierarc) S3method(predict, bayes) S3method(print, aggregateDist) S3method(print, elev) S3method(print, cm) S3method(print, mde) S3method(print, ogive) S3method(print, summary.ogive) S3method(print, portfolio) S3method(print, summary.aggregateDist) S3method(print, summary.cm) S3method(quantile, aggregateDist) S3method(quantile, grouped.data) S3method(severity, default) S3method(severity, portfolio) S3method(summary, aggregateDist) S3method(summary, cm) S3method(summary, grouped.data) S3method(summary, elev) S3method(summary, ogive) S3method(VaR, aggregateDist) S3method(weights, portfolio) actuar/inst/0000755000176200001440000000000015151412457012513 5ustar liggesusersactuar/inst/include/0000755000176200001440000000000015151412457014136 5ustar liggesusersactuar/inst/include/actuarAPI.h0000644000176200001440000004314515147745722016140 0ustar liggesusers/* actuar: Actuarial Functions and Heavy Tailed Distributions * * Support for exported functions at the C level. * * This is derived from code in package zoo. * * Copyright (C) 2020 Vincent Goulet * Copyright (C) 2010 Jeffrey A. Ryan jeff.a.ryan @ gmail.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * AUTHOR: Vincent Goulet */ #include #include #include #ifdef __cplusplus extern "C" { #endif /* Special integrals */ double betaint(double x, double a, double b, int foo); double betaint_raw(double x, double a, double b, double x1m); /* One parameter distributions */ double mexp(double order, double scale, int give_log); double levexp(double limit, double scale, double order, int give_log); double mgfexp(double t, double scale, int give_log); double dinvexp(double x, double scale, int give_log); double pinvexp(double q, double scale, int lower_tail, int log_p); double qinvexp(double p, double scale, int lower_tail, int log_p); double rinvexp(double scale); double minvexp(double order, double scale, int give_log); double levinvexp(double limit, double scale, double order, int give_log); double dlogarithmic(double x, double p, int give_log); double plogarithmic(double x, double p, int lower_tail, int log_p); double qlogarithmic(double x, double p, int lower_tail, int log_p); double rlogarithmic(double p); double dztpois(double x, double lambda, int give_log); double pztpois(double q, double lambda, int lower_tail, int log_p); double qztpois(double p, double lambda, int lower_tail, int log_p); double rztpois(double lambda); double dztgeom(double x, double prob, int give_log); double pztgeom(double q, double prob, int lower_tail, int log_p); double qztgeom(double p, double prob, int lower_tail, int log_p); double rztgeom(double prob); /* Two parameter distributions */ double munif(double order, double min, double max, int give_log); double levunif(double limit, double min, double max, double order, int give_log); double mgfunif(double t, double min, double max, int give_log); double mnorm(double order, double mean, double sd, int give_log); double mgfnorm(double t, double mean, double sd, int give_log); double mbeta(double order, double shape1, double shape2, int give_log); double levbeta(double limit, double shape1, double shape2, double order, int give_log); double mgamma(double order, double shape, double scale, int give_log); double levgamma(double limit, double shape, double scale, double order, int give_log); double mgfgamma(double t, double shape, double scale, int give_log); double mchisq(double order, double df, double ncp, int give_log); double levchisq(double limit, double df, double ncp, double order, int give_log); double mgfchisq(double t, double df, double ncp, int give_log); double dinvgamma(double x, double scale, double shape, int give_log); double pinvgamma(double q, double scale, double shape, int lower_tail, int log_p); double qinvgamma(double p, double scale, double shape, int lower_tail, int log_p); double rinvgamma(double scale, double shape); double minvgamma(double order, double scale, double shape, int give_log); double levinvgamma(double limit, double scale, double shape, double order, int give_log); double mgfinvgamma(double t, double shape, double scale, int give_log); double dinvparalogis(double x, double shape, double scale, int give_log); double pinvparalogis(double q, double shape, double scale, int lower_tail, int log_p); double qinvparalogis(double p, double shape, double scale, int lower_tail, int log_p); double rinvparalogis(double shape, double scale); double minvparalogis(double order, double shape, double scale, int give_log); double levinvparalogis(double limit, double shape, double scale, double order, int give_log); double dinvpareto(double x, double shape, double scale, int give_log); double pinvpareto(double q, double shape, double scale, int lower_tail, int log_p); double qinvpareto(double p, double shape, double scale, int lower_tail, int log_p); double rinvpareto(double shape, double scale); double minvpareto(double order, double shape, double scale, int give_log); double levinvpareto(double limit, double shape, double scale, double order, int log_p); double dinvweibull(double x, double scale, double shape, int give_log); double pinvweibull(double q, double scale, double shape, int lower_tail, int log_p); double qinvweibull(double p, double scale, double shape, int lower_tail, int log_p); double rinvweibull(double scale, double shape); double minvweibull(double order, double scale, double shape, int give_log); double levinvweibull(double limit, double scale, double shape, double order, int give_log); double dlgamma(double x, double shapelog, double ratelog, int give_log); double plgamma(double q, double shapelog, double ratelog, int lower_tail, int log_p); double qlgamma(double p, double shapelog, double ratelog, int lower_tail, int log_p); double rlgamma(double ratelog, double shapelog); double mlgamma(double order, double shapelog, double ratelog, int give_log); double levlgamma(double limit, double shapelog, double ratelog, double order, int give_log); double dllogis(double x, double shape, double scale, int give_log); double pllogis(double q, double shape, double scale, int lower_tail, int log_p); double qllogis(double p, double shape, double scale, int lower_tail, int log_p); double rllogis(double shape, double scale); double mllogis(double order, double shape, double scale, int give_log); double levllogis(double limit, double shape, double scale, double order, int give_log); double mlnorm(double order, double logmean, double logsd, int give_log); double levlnorm(double limit, double logmean, double logsd, double order, int give_log); double dparalogis(double x, double shape, double scale, int give_log); double pparalogis(double q, double shape, double scale, int lower_tail, int log_p); double qparalogis(double p, double shape, double scale, int lower_tail, int log_p); double rparalogis(double shape, double scale); double mparalogis(double order, double shape, double scale, int give_log); double levparalogis(double limit, double shape, double scale, double order, int give_log); double dpareto(double x, double shape, double scale, int give_log); double ppareto(double q, double shape, double scale, int lower_tail, int log_p); double qpareto(double p, double shape, double scale, int lower_tail, int log_p); double rpareto(double shape, double scale); double mpareto(double order, double shape, double scale, int give_log); double levpareto(double limit, double shape, double scale, double order, int give_log); double dpareto1(double x, double shape, double scale, int give_log); double ppareto1(double q, double shape, double scale, int lower_tail, int log_p); double qpareto1(double p, double shape, double scale, int lower_tail, int log_p); double rpareto1(double shape, double scale); double mpareto1(double order, double shape, double scale, int give_log); double levpareto1(double limit, double shape, double scale, double order, int give_log); double mweibull(double order, double scale, double shape, int give_log); double levweibull(double limit, double scale, double shape, double order, int give_log); double dgumbel(double x, double alpha, double beta, int give_log); double pgumbel(double q, double alpha, double beta, int lower_tail, int log_p); double qgumbel(double p, double alpha, double beta, int lower_tail, int log_p); double rgumbel(double alpha, double beta); double mgumbel(double order, double alpha, double beta, int give_log); double mgfgumbel(double t, double alpha, double beta, int give_log); double dinvgauss(double x, double mu, double phi, int give_log); double pinvgauss(double q, double mu, double phi, int lower_tail, int log_p); double qinvgauss(double q, double mu, double phi, int lower_tail, int log_p, double tol, int maxit, int echo); double rinvgauss(double mu, double phi); double minvgauss(double order, double mean, double phi, int give_log); double levinvgauss(double limit, double mean, double phi, double order, int give_log); double mgfinvgauss(double t, double mean, double phi, int give_log); double dztnbinom(double x, double size, double prob, int give_log); double pztnbinom(double q, double size, double prob, int lower_tail, int log_p); double qztnbinom(double p, double size, double prob, int lower_tail, int log_p); double rztnbinom(double size, double prob); double dztbinom(double x, double size, double prob, int give_log); double pztbinom(double q, double size, double prob, int lower_tail, int log_p); double qztbinom(double p, double size, double prob, int lower_tail, int log_p); double rztbinom(double size, double prob); double dzmlogarithmic(double x, double p, double p0m, int give_log); double pzmlogarithmic(double x, double p, double p0m, int lower_tail, int log_p); double qzmlogarithmic(double x, double p, double p0m, int lower_tail, int log_p); double rzmlogarithmic(double p, double p0m); double dzmpois(double x, double lambda, double p0m, int give_log); double pzmpois(double q, double lambda, double p0m, int lower_tail, int log_p); double qzmpois(double p, double lambda, double p0m, int lower_tail, int log_p); double rzmpois(double lambda, double p0m); double dzmgeom(double x, double prob, double p0m, int give_log); double pzmgeom(double q, double prob, double p0m, int lower_tail, int log_p); double qzmgeom(double p, double prob, double p0m, int lower_tail, int log_p); double rzmgeom(double prob, double p0m); double dpoisinvgauss(double x, double mu, double phi, int give_log); double ppoisinvgauss(double q, double mu, double phi, int lower_tail, int log_p); double qpoisinvgauss(double p, double mu, double phi, int lower_tail, int log_p); double rpoisinvgauss(double mu, double phi); /* Three parameter distributions */ double dburr(double x, double shape1, double shape2, double scale, int give_log); double pburr(double q, double shape1, double shape2, double scale, int lower_tail, int log_p); double qburr(double p, double shape1, double shape2, double scale, int lower_tail, int log_p); double rburr(double shape1, double shape2, double scale); double mburr(double order, double shape1, double shape2, double scale, int give_log); double levburr(double limit, double shape1, double shape2, double scale, double order, int give_log); double dgenpareto(double x, double shape1, double shape2, double scale, int give_log); double pgenpareto(double q, double shape1, double shape2, double scale, int lower_tail, int log_p); double qgenpareto(double p, double shape1, double shape2, double scale, int lower_tail, int log_p); double rgenpareto(double shape1, double shape2, double scale); double mgenpareto(double order, double shape1, double shape2, double scale, int give_log); double levgenpareto(double limit, double shape1, double shape2, double scale, double order, int give_log); double dinvburr(double x, double shape1, double shape2, double scale, int give_log); double pinvburr(double q, double shape1, double shape2, double scale, int lower_tail, int log_p); double qinvburr(double p, double shape1, double shape2, double scale, int lower_tail, int log_p); double rinvburr(double shape1, double shape2, double scale); double minvburr(double order, double shape1, double shape2, double scale, int give_log); double levinvburr(double limit, double shape1, double shape2, double scale, double order, int give_log); double dinvtrgamma(double x, double shape1, double shape2, double scale, int give_log); double pinvtrgamma(double q, double shape1, double shape2, double scale, int lower_tail, int log_p); double qinvtrgamma(double p, double shape1, double shape2, double scale, int lower_tail, int log_p); double rinvtrgamma(double shape1, double shape2, double scale); double minvtrgamma(double order, double shape1, double shape2, double scale, int give_log); double levinvtrgamma(double limit, double shape1, double shape2, double scale, double order, int give_log); double dtrgamma(double x, double shape1, double shape2, double scale, int give_log); double ptrgamma(double q, double shape1, double shape2, double scale, int lower_tail, int log_p); double qtrgamma(double p, double shape1, double shape2, double scale, int lower_tail, int log_p); double rtrgamma(double shape1, double shape2, double scale); double mtrgamma(double order, double shape1, double shape2, double scale, int give_log); double levtrgamma(double limit, double shape1, double shape2, double scale, double order, int give_log); double dpareto2(double x, double min, double shape, double scale, int give_log); double ppareto2(double q, double min, double shape, double scale, int lower_tail, int log_p); double qpareto2(double p, double min, double shape, double scale, int lower_tail, int log_p); double rpareto2(double min, double shape, double scale); double mpareto2(double order, double min, double shape, double scale, int give_log); double levpareto2(double limit, double min, double shape, double scale, double order, int give_log); double dpareto3(double x, double min, double shape, double scale, int give_log); double ppareto3(double q, double min, double shape, double scale, int lower_tail, int log_p); double qpareto3(double p, double min, double shape, double scale, int lower_tail, int log_p); double rpareto3(double min, double shape, double scale); double mpareto3(double order, double min, double shape, double scale, int give_log); double levpareto3(double limit, double min, double shape, double scale, double order, int give_log); double dzmnbinom(double x, double size, double prob, double p0m, int give_log); double pzmnbinom(double q, double size, double prob, double p0m, int lower_tail, int log_p); double qzmnbinom(double p, double size, double prob, double p0m, int lower_tail, int log_p); double rzmnbinom(double size, double prob, double p0m); double dzmbinom(double x, double size, double prob, double p0m, int give_log); double pzmbinom(double q, double size, double prob, double p0m, int lower_tail, int log_p); double qzmbinom(double p, double size, double prob, double p0m, int lower_tail, int log_p); double rzmbinom(double size, double prob, double p0m); /* Four parameter distributions */ double dgenbeta(double x, double shape1, double shape2, double shape3, double scale, int give_log); double pgenbeta(double q, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p); double qgenbeta(double p, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p); double rgenbeta(double shape1, double shape2, double shape3, double scale); double mgenbeta(double order, double shape1, double shape2, double shape3, double scale, int give_log); double levgenbeta(double limit, double shape1, double shape2, double shape3, double scale, double order, int give_log); double dtrbeta(double x, double shape1, double shape2, double shape3, double scale, int give_log); double ptrbeta(double q, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p); double qtrbeta(double p, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p); double rtrbeta(double shape1, double shape2, double shape3, double scale); double mtrbeta(double order, double shape1, double shape2, double shape3, double scale, int give_log); double levtrbeta(double limit, double shape1, double shape2, double shape3, double scale, double order, int give_log); double dpareto4(double x, double min, double shape1, double shape2, double scale, int give_log); double ppareto4(double q, double min, double shape1, double shape2, double scale, int lower_tail, int log_p); double qpareto4(double p, double min, double shape1, double shape2, double scale, int lower_tail, int log_p); double rpareto4(double min, double shape1, double shape2, double scale); double mpareto4(double order, double min, double shape1, double shape2, double scale, int give_log); double levpareto4(double limit, double min, double shape1, double shape2, double scale, double order, int give_log); /* Five parameter distributions */ double dfpareto(double x, double min, double shape1, double shape2, double shape3, double scale, int give_log); double pfpareto(double q, double min, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p); double qfpareto(double p, double min, double shape1, double shape2, double shape3, double scale, int lower_tail, int log_p); double rfpareto(double min, double shape1, double shape2, double shape3, double scale); double mfpareto(double order, double min, double shape1, double shape2, double shape3, double scale, int give_log); double levfpareto(double limit, double mu, double shape1, double shape2, double shape3, double scale, double order, int give_log); /* Phase-type distributions */ double dphtype(double x, double *pi, double *T, int m, int give_log); double pphtype(double x, double *pi, double *T, int m, int lower_tail, int log_p); double rphtype(double *pi, double **Q, double *rates, int m); double mphtype(double order, double *pi, double *T, int m, int give_log); double mgfphtype(double x, double *pi, double *T, int m, int give_log); #ifdef __cplusplus } #endif actuar/inst/CITATION0000644000176200001440000000205715147745722013665 0ustar liggesusersp1 <- person("Christophe", "Dutang", email = "dutang@ceremade.dauphine.fr") p2 <- person("Vincent", "Goulet", email = "vincent.goulet@act.ulaval.ca") p3 <- person("Nicholas", "Langevin") p4 <- person("Mathieu", "Pigeon", email = "pigeon.mathieu.2@uqam.ca") bibentry(bibtype = "Article", title = "actuar: An {R} Package for Actuarial Science", author = c(p1, p2, p4), journal = "Journal of Statistical Software", year = "2008", volume = "25", number = "7", pages = "1--37", doi = "10.18637/jss.v025.i07", header = "To cite actuar in publications use:") bibentry(bibtype = "Article", title = "{F}eller-{P}areto and Related Distributions: Numerical Implementation and Actuarial Applications", author = c(p1, p2, p3), journal = "Journal of Statistical Software", year = "2022", volume = "103", number = "6", pages = "1--22", doi = "10.18637/jss.v103.i06", header = "For the implementation of the Feller-Pareto family of distributions, use:") actuar/inst/po/0000755000176200001440000000000015147745722013142 5ustar liggesusersactuar/inst/po/fr/0000755000176200001440000000000015147745722013551 5ustar liggesusersactuar/inst/po/fr/LC_MESSAGES/0000755000176200001440000000000015147745722015336 5ustar liggesusersactuar/inst/po/fr/LC_MESSAGES/R-actuar.mo0000644000176200001440000002337315147745722017361 0ustar liggesusersy8 9 )? i + ) 4 ; ,P } 4 0 $  @ T !h 8 B ( / P 0k   F    #'&)N<x&=A"-d4 ,? Mgv27LS&W%~A %8<XI  0Oi o} 059Yms v&   ',2)9"c G/ 4U^}6U[>a2+9?X7<--)[')=K@+#835U9 170GC4YT"?w8 5 D?6 N m    - " F! M!W!1`!A!K!Z " {" """""""/""# #(,#%U#4{#8#;#%$?$E$ H$S$\$*{$$$ $ $$ $ %%% %#=%a%g%n% t%~%%%%J%5%&8&$A&f&;&&&&&&R3*fY8 \Sp/v V_so&uKX@:WbJIq9M=(E#n>A<leBd5^+yG0)Cg$4"ZQFU,t7H amON%x-jk Lc!wDP' ;.1?T]r[2`6ih!freq%s has many elements: only the first used%s ignored when %s is specified%s is an alias for %s, however they differ.%s measure requires an object of class %s%s must be a function or an expression containing %s%s must be a function or an expression containing %s and %s%s must be a function when using reinsurance%s must be a named list%s must be a numeric vector or an object of class %s%s must be a valid probability (between 0 and 1)%s must be a vector of probabilities%s must be a vector or a matrix%s must be positive%s must be supplied%s must be supplied as a function%s must supply the mean and variance of the distribution%s must supply the mean, variance and skewness of the distribution%s must supply the number of simulations%s not used when %s is specified%s required with method %s%s specifies names which are not arguments to %s,LASPr[S = 0] is numerically equal to 0; impossible to start the recursionbreaksbycdfchi-squarecoinsurance must be between 0 and 1coverage modifications must be positivedeductible must be smaller than the limitempty regression model; fitting with Buhlmann-Straub's modelexpressions in %s and %s must be namedformulafrequency distribution must be supplied as a character stringfrequency distribution not in the (a, b, 0) or (a, b, 1) familiesfrequency must be larger than 0 in all groupsfunfunction not defined for approximating distributionsgroupgrouped.datahhierarchical regression models not supportedimpossible to replace boundaries and frequencies simultaneouslyinfinite group boundariesinternal errorinvalid %s specificationinvalid first argument %sinvalid level nameinvalid level numberinvalid number of group boundaries and frequenciesinvalid parameters in %sinvalid third argument %sinvalid values in %slambdalevlevel names different in %s, %s and %slower bound of the likelihood missingmaximum number of iterations reached before obtaining convergencemgf.claimmgf.waitmissing frequencies replaced by zerosmissing ratios not allowed when weights are not suppliedmissing values are not in the same positions in %s and in %smissing values are not in the same positions in 'weights' and in 'ratios'model.freqmodel.sevmodelsmomentsnnb.simulnclassneed 0, 1, or 2 subscriptsno available data to fit modelno positive probabilitiesnodesnothing to doone of %s or %s is neededone of %s or %s must be non-NULLone of the Beta prior parameter %s or %s missingone of the Gamma prior parameter %s, %s or %s missingonly logical matrix subscripts are allowed in replacementoptimization failedorderp0par.claimspar.waitparameter %s missing in %sparameter %s of the likelihood missingparameter %s or %s missing in %sparameters %s missing in %spremium.rateprobprobabilityrateratesratiosratios have to be supplied if weights arerows extracted in increasing orderscalesd.likshapeshape.likshape1shape2sizestartthere must be at least one node with more than one period of experiencethere must be at least two nodes at every levelthere must be more than one nodeunbiasedunsupported interactions in %sunsupported likelihoodvalue of %s ignored with a zero-truncated distributionvalue of %s missingvalue of %s or %s missingweightsxyProject-Id-Version: actuar 2.0-0 Report-Msgid-Bugs-To: bugs@r-project.org PO-Revision-Date: 2023-11-07 14:45-0500 Last-Translator: Vincent Goulet Language-Team: Vincent Goulet Language: fr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit !freq%s contient plusieurs éléments: seul le premier est utilisé%s ignoré quand %s est fourni%s est un alias pour %s, cependant ils diffèrent.la mesure %s requiert un objet de classe %s%s doit être une fonction ou une expression contenant %s%s doit être une fonction ou une expression contenant %s et %s%s doit être une fonction en présence de réassurance%s doit être une liste nommée%s doit être un vecteur numérique ou un objet de classe %s%s doit être une probabilité (entre 0 et 1)%s doit être un vecteur de probabilités%s doit être un vecteur ou une matrice%s doit être positif%s doit être fourni%s doit être fourni en tant que fonction%s doit contenir la moyenne et la variance de la distribution%s doit contenir la moyenne, la variance et l'asymétrie de la distribution%s doit spécifier le nombre de simulations%s non utilisé quand %s est fourni%s requis pour la méthode %s%s contient des noms qui ne sont pas des arguments de %s,LASvaleur de Pr[S = 0] numériquement nulle; impossible de démarrer le calcul récursifbreaksbycdfchi-squarele facteur de coassurance doit être entre 0 et 1les modifications de couverture doivent être positivesla franchise doit être inférieure à la limitemodèle de régression vide; utilisation du modèle de Bühlmann-Straubles expressions dans %s et %s doivent être nomméesformulala distribution de fréquence doit être spécifiée sous forme de chaîne de caractèresla distribution de fréquence ne fait pas partie des familles (a, b, 0) ou (a, b, 1)la fréquence doit être supérieure à 0 dans tous les groupesfunfonction non définie pour les méthodes d'approximationgroupgrouped.datahmodèles de régression hiérarchiques non supportésimpossible de remplacer simultanément les bornes et les fréquencesbornes de groupe infinieserreur internevaleur de %s incorrectepremier argument %s incorrectnom de niveau incorrectnuméro de niveau incorrectnombre de bornes de groupe et de fréquences incorrectparamètres incorrects dans %stroisième argument %s incorrectvaleurs incorrectes dans %slambdalevnoms de niveaux différents dans %s, %s et %sseuil de la vraisemblance manquantnombre d'itérations maximal atteint avant obtention de la convergencemgf.claimmgf.waitfréquences manquantes remplacées par des zérosratios manquants non permis lorsque les poids ne sont pas fournisles données manquantes ne sont pas aux mêmes positions dans %s et dans %sles données manquantes ne sont pas aux mêmes positions dans les poids et dans les ratiosmodel.freqmodel.sevmodelsmomentsnnb.simulnclassil faut 0, 1 ou 2 indicesaucune donnée disponible pour la modélisationaucune probabilité positivenodesrien à fairel'une ou l'autre de %s ou %s est requiseun de %s ou %s doit ne pas être NULLun des paramètres %s ou %s de la loi Bêta manquantun des paramètres %s, %s ou %s de la loi Gamma manquantseuls les indices logiques sont permis pour le remplacementl'optimisation a échouéorderp0par.claimspar.waitparamètre %s manquant dans %sparamètre %s de la vraisemblance manquantparamètre %s ou %s manquant dans %sparamètres %s manquants dans %spremium.rateprobprobabilityrateratesratiosratios requis s'il y a des poidslignes extraites en ordre croissantscalesd.likshapeshape.likshape1shape2sizestartil y doit y avoir au moins un noeud avec plus d'une période d'expérienceil doit y avoir au moins deux noeuds à chaque niveauil doit y avoir plus d'un noeudunbiasedinteractions non supportées dans %svraisemblance non validevaleur de %s ignorée pour une distribution zéro tronquéevaleur de %s manquantevaleur de %s ou %s manquanteweightsxyactuar/inst/po/fr/LC_MESSAGES/actuar.mo0000644000176200001440000000727315147745722017163 0ustar liggesusers!$/, /:'8b++0 $51g z     &@"g#####(>)g/AUko/ 1J P| W 9% 9_ A  F 3 "M "p " " " " ( $H %m % % % % *+ +V  5 F G\|&     !'A' is 0-diml'order' (%.2f) must be integer, rounded to %.0fLAPACK routine dgebal returned info code %d when permutingLAPACK routine dgebal returned info code %d when scalingLAPACK routine dgetrf returned info code %dLAPACK routine dgetrs returned info code %dLapack routine dgesv: system is exactly singularNAs producedargument %d of Lapack routine dgesv had invalid valueintegration failedinternal error in actuar_do_dpq1internal error in actuar_do_dpq2internal error in actuar_do_dpq3internal error in actuar_do_dpq4internal error in actuar_do_dpq5internal error in actuar_do_dpq6internal error in actuar_do_dpqphtype2internal error in actuar_do_randominternal error in actuar_do_random1internal error in actuar_do_random2internal error in actuar_do_random3internal error in actuar_do_random4internal error in actuar_do_random5internal error in actuar_do_randomphtypeinternal error in actuar_do_randomphtype2invalid argumentsmaximum number of iterations must be at least 1maximum number of iterations reached before obtaining convergencemaximum number of recursions reached before the probability distribution was completeno right-hand side in 'B'non-conformable argumentsnon-square sub-intensity matrixProject-Id-Version: actuar 1.1-7 Report-Msgid-Bugs-To: PO-Revision-Date: 2020-06-03 12:33-0400 Last-Translator: Vincent Goulet Language-Team: Vincent Goulet Language: fr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n > 1); 'A' est de dimension nulle'order' (%.2f) doit être entier, arrondi à %.0fla procédure LAPACK dgebal a produit le code d'erreur %d lors de la permutationla procédure LAPACK dgebal a produit le code d'erreur %d lors de la mise à l'échellela procédure LAPACK dgetrf a produit le code d'erreur %dla procédure LAPACK dgetrs a produit le code d'erreur %dsous-programme Lapack dgesv: le système est exactement singulierproduction de NAvaleur incorrecte pour l'argument %d du sous-programme dgesv de Lapackl'intégration a échouéerreur interne dans actuar_do_dpq1erreur interne dans actuar_do_dpq2erreur interne dans actuar_do_dpq3erreur interne dans actuar_do_dpq4erreur interne dans actuar_do_dpq5erreur interne dans actuar_do_dpq6erreur interne dans actuar_do_dpqphtype2erreur interne dans actuar_do_randomerreur interne dans actuar_do_random1erreur interne dans actuar_do_random2erreur interne dans actuar_do_random3erreur interne dans actuar_do_random4erreur interne dans actuar_do_random5erreur interne dans actuar_do_randomphtypeerreur interne dans actuar_do_randomphtype2arguments incorrectsle nombre d'itérations maximal doit être au moins 1nombre d'itérations maximal atteint avant obtention de la convergencenombre de récursions maximal atteint avant obtention de la convergenceaucun membre de droite dans 'B'arguments non conformesmatrice de sous-intensité non carréeactuar/inst/po/it/0000755000176200001440000000000015147745722013556 5ustar liggesusersactuar/inst/po/it/LC_MESSAGES/0000755000176200001440000000000015147745722015343 5ustar liggesusersactuar/inst/po/it/LC_MESSAGES/R-actuar.mo0000644000176200001440000002313515147745722017362 0ustar liggesusersy8 9 )? i + ) 4 ; ,P } 4 0 $  @ T !h 8 B ( / P 0k   F    #'&)N<x&=A"-d4 ,? Mgv27LS&W%~A %8<XI  0Oi o} 059Yms v&   ',2)9"c G/ 4U^}6U[1a!0-9>N@ ;1+(]/': HF)./68M<  2-B%0hNC:4o4s 0B1N]}2  9 Q X &\ 1 A !& !B1!At!N! " ""!")"+"4";"2V"""""""1"6*#=a#### ###,# $=$ Y$f$ k$w$|$$/$"$$$$ $$%% %?%.R%%% %%9% &6&Q&Y&[&R3*fY8 \Sp/v V_so&uKX@:WbJIq9M=(E#n>A<leBd5^+yG0)Cg$4"ZQFU,t7H amON%x-jk Lc!wDP' ;.1?T]r[2`6ih!freq%s has many elements: only the first used%s ignored when %s is specified%s is an alias for %s, however they differ.%s measure requires an object of class %s%s must be a function or an expression containing %s%s must be a function or an expression containing %s and %s%s must be a function when using reinsurance%s must be a named list%s must be a numeric vector or an object of class %s%s must be a valid probability (between 0 and 1)%s must be a vector of probabilities%s must be a vector or a matrix%s must be positive%s must be supplied%s must be supplied as a function%s must supply the mean and variance of the distribution%s must supply the mean, variance and skewness of the distribution%s must supply the number of simulations%s not used when %s is specified%s required with method %s%s specifies names which are not arguments to %s,LASPr[S = 0] is numerically equal to 0; impossible to start the recursionbreaksbycdfchi-squarecoinsurance must be between 0 and 1coverage modifications must be positivedeductible must be smaller than the limitempty regression model; fitting with Buhlmann-Straub's modelexpressions in %s and %s must be namedformulafrequency distribution must be supplied as a character stringfrequency distribution not in the (a, b, 0) or (a, b, 1) familiesfrequency must be larger than 0 in all groupsfunfunction not defined for approximating distributionsgroupgrouped.datahhierarchical regression models not supportedimpossible to replace boundaries and frequencies simultaneouslyinfinite group boundariesinternal errorinvalid %s specificationinvalid first argument %sinvalid level nameinvalid level numberinvalid number of group boundaries and frequenciesinvalid parameters in %sinvalid third argument %sinvalid values in %slambdalevlevel names different in %s, %s and %slower bound of the likelihood missingmaximum number of iterations reached before obtaining convergencemgf.claimmgf.waitmissing frequencies replaced by zerosmissing ratios not allowed when weights are not suppliedmissing values are not in the same positions in %s and in %smissing values are not in the same positions in 'weights' and in 'ratios'model.freqmodel.sevmodelsmomentsnnb.simulnclassneed 0, 1, or 2 subscriptsno available data to fit modelno positive probabilitiesnodesnothing to doone of %s or %s is neededone of %s or %s must be non-NULLone of the Beta prior parameter %s or %s missingone of the Gamma prior parameter %s, %s or %s missingonly logical matrix subscripts are allowed in replacementoptimization failedorderp0par.claimspar.waitparameter %s missing in %sparameter %s of the likelihood missingparameter %s or %s missing in %sparameters %s missing in %spremium.rateprobprobabilityrateratesratiosratios have to be supplied if weights arerows extracted in increasing orderscalesd.likshapeshape.likshape1shape2sizestartthere must be at least one node with more than one period of experiencethere must be at least two nodes at every levelthere must be more than one nodeunbiasedunsupported interactions in %sunsupported likelihoodvalue of %s ignored with a zero-truncated distributionvalue of %s missingvalue of %s or %s missingweightsxyProject-Id-Version: actuar 2.0-0 Report-Msgid-Bugs-To: bugs@r-project.org PO-Revision-Date: 2023-11-07 14:46-0500 Last-Translator: Daniele Medri Language-Team: Daniele Medri Language: it MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit X-Generator: Poedit 2.4.2 !freq%s ha molti elementi: solo il primo è utilizzato%s ignorato quando %s è presente%s è un alisa per %s, comunque sono differenti.la misura %s richiede un oggetto di classe %s%s dev'essere una funzione o un'espressione contenente %s%s dev'essere una funzione o un'espressione contenente %s e %s%s dev'essere una funzione quando si utilizza la riassicurazione%s dev'essere una lista nominata%s dev'essere un vettore numerico o un oggetto di classe %s%s dev'essere una probabilità valida (tra 0 e 1)%s dev'essere un vettore di probabilità%s dev'essere un vettore numerico o una matrice%s dev'essere positivo%s dev'essere passata%s dev'essere passata come una funzione%s deve fornire la media e la varianza della distribuzione%s deve fornire la media, la varianza e l'asimmetria della distribuzione%s deve indicare il numero di simulazioni%s non viene usata quando viene specificato %s%s richiesto con il metodo %s%s specifica nomi che non sono argomenti per %s,LASPr[S = 0] è numericamente uguale a 0; non è possibile avviare la ricorsionebreaksbycdfchi-squarecoinsurance dev'essere tra 0 e 1le modifiche alla copertura devono essere positivedeductible dev'essere più piccolo del limitemodello di regressione vuoto; stima con il modello Buhlmann-Strauble espressioni in %s e %s devono essere indicateformulala distribuzione di frequenza devono essere passate come una stringa caratteredistribuzione di frequenza non nelle famiglie (a, b, 0) o (a, b, 1)la frequenza dev'essere più grande di 0 in tutti i gruppifunfunzione non definita per approssimare distribuzionigroupgrouped.datahmodelli di regressione gerarchica non supportatinon è possibile sostituire estremi e frequenze contemporaneamenteestremi di gruppo non finitierrore internospecificazione di %s non validaprimo argomento %s non validonome livello non validonumero livello non validonumero di estremi di gruppo e frequenze non validoparametri non validi in %sterzo argomento %s non validovalori non validi in %slambdalevnomi livello differenti in %s, %s e %sestremo inferiore mancante per la verosimiglianzaraggiunto il numero massimo di iterazioni prima della convergenzamgf.claimmgf.waitfrequenze mancanti sostituite con zeronon sono ammessi rapporti mancanti quando i pesi non sono indicatii valori mancanti non sono nelle medesime posizioni in %s e in %si valori mancanti non sono nelle medesime posizioni in 'weights' e in 'ratios'model.freqmodel.sevmodelsmomentsnnb.simulnclassrichiede 0, 1 o due indicinon ci sono abbastanza dati per stimare il modellonessuna probabilità positivanodesniente da farerichiesto uno di %s o %suno di %s o %s non dev'essere NULLmanca uno dei parametri Beta a priori tra %s o %smanca uno dei parametri Gamma a priori tra %s, %s o %sin sostituzione sono consentiti solo pedici di matrice logicaottimizzazione fallitaorderp0par.claimspar.waitparametri %s mancanti in %sparametro %s mancante per la verosimiglianzaparametri %s o %s mancanti in %sparametri %s mancanti in %spremium.rateprobprobabilityrateratesratiosi rapporti devono essere passati se i pesi sonorighe estratte in ordine crescentescalesd.likshapeshape.likshape1shape2sizestartdev'esserci almeno un nodo con più di un periodo di esperienzadevono esserci almeno due nodi in ogni livellodev'esserci più di un nodounbiasedinterazioni non supportate in %sverosimiglianza non supportatavalore di %s ignorato con una distribuzione troncata zerovalore di %s mancantevalore di %s o %s mancanteweightsxyactuar/inst/po/it/LC_MESSAGES/actuar.mo0000644000176200001440000000727215147745722017167 0ustar liggesusers!$/, /:'8b++0 $51g z     &@"g#####(>)g/AUkq 1 7? Zw U B( Bk ?  D F [ | &! "H #k # # # # ( )H r 3 A e c"     !'A' is 0-diml'order' (%.2f) must be integer, rounded to %.0fLAPACK routine dgebal returned info code %d when permutingLAPACK routine dgebal returned info code %d when scalingLAPACK routine dgetrf returned info code %dLAPACK routine dgetrs returned info code %dLapack routine dgesv: system is exactly singularNAs producedargument %d of Lapack routine dgesv had invalid valueintegration failedinternal error in actuar_do_dpq1internal error in actuar_do_dpq2internal error in actuar_do_dpq3internal error in actuar_do_dpq4internal error in actuar_do_dpq5internal error in actuar_do_dpq6internal error in actuar_do_dpqphtype2internal error in actuar_do_randominternal error in actuar_do_random1internal error in actuar_do_random2internal error in actuar_do_random3internal error in actuar_do_random4internal error in actuar_do_random5internal error in actuar_do_randomphtypeinternal error in actuar_do_randomphtype2invalid argumentsmaximum number of iterations must be at least 1maximum number of iterations reached before obtaining convergencemaximum number of recursions reached before the probability distribution was completeno right-hand side in 'B'non-conformable argumentsnon-square sub-intensity matrixProject-Id-Version: actuar 1.1-7 Report-Msgid-Bugs-To: PO-Revision-Date: 2022-04-13 11:12+0200 Last-Translator: Daniele Medri Language-Team: Daniele Medri Language: it_IT MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); X-Generator: Poedit 2.4.2 'A' è 0-diml'order' (%.2f) dev'essere un intero, arrotondato a %.0fLa routine dgebal di LAPACK ha restituito il codice informativo %d durante la permutazioneLa routine dgebal di LAPACK ha restituito il codice informativo %d durante lo scalingLa routine dgetrf di LAPACK ha restituito il codice informativo %dLa routine dgetrs di LAPACK ha restituito il codice informativo %dLa routine dgesv di Lapack: il sistema è esattamente singolareGenerati valori NAl'argomento %d della routine dgesv di Lapack ha un valore non validointegrazione fallitaerrore interno in actuar_do_dpq1errore interno in actuar_do_dpq2errore interno in actuar_do_dpq3errore interno in actuar_do_dpq4errore interno in actuar_do_dpq5errore interno in actuar_do_dpq6errore interno in actuar_do_dpqphtype2errore interno in actuar_do_randomerrore interno in actuar_do_random1errore interno in actuar_do_random2errore interno in actuar_do_random3errore interno in actuar_do_random4errore interno in actuar_do_random5errore interno in actuar_do_randomphtypeerrore interno in actuar_do_randomphtype2argomenti non validiil numero massimo di iterazioni dev'essere almeno 1raggiunto il numero massimo di iterazioni prima della convergenzaraggiunto il numero massimo di ricorsioni prima che la distribuzione di probabilità fosse completatanessun membro di destra in 'B'gli argomenti non sono compatibilimatrice non quadrataactuar/inst/po/en@quot/0000755000176200001440000000000015147745722014555 5ustar liggesusersactuar/inst/po/en@quot/LC_MESSAGES/0000755000176200001440000000000015147745722016342 5ustar liggesusersactuar/inst/po/en@quot/LC_MESSAGES/R-actuar.mo0000644000176200001440000002235515147745722020364 0ustar liggesusersy8 9 )? i + ) 4 ; ,P } 4 0 $  @ T !h 8 B ( / P 0k   F    #'&)N<x&=A"-d4 ,? Mgv27LS&W%~A %8<XI  0Oi o} 059Yms v&   ',2)9"c G/ 4U^}6)I+i)4;,0]4u0$ 4!H8jB( 00K|~F #').<X&=A-Dr4v ,?-GVo2,3&7%^A %8<8 Qu       !7!Q! W!e! !0!5!9"A"U"[" ^"i"r"&" "" "" ####)!#"K#n#t#{# #####G#/# $=$F$e$6|$$$$$$R3*fY8 \Sp/v V_so&uKX@:WbJIq9M=(E#n>A<leBd5^+yG0)Cg$4"ZQFU,t7H amON%x-jk Lc!wDP' ;.1?T]r[2`6ih!freq%s has many elements: only the first used%s ignored when %s is specified%s is an alias for %s, however they differ.%s measure requires an object of class %s%s must be a function or an expression containing %s%s must be a function or an expression containing %s and %s%s must be a function when using reinsurance%s must be a named list%s must be a numeric vector or an object of class %s%s must be a valid probability (between 0 and 1)%s must be a vector of probabilities%s must be a vector or a matrix%s must be positive%s must be supplied%s must be supplied as a function%s must supply the mean and variance of the distribution%s must supply the mean, variance and skewness of the distribution%s must supply the number of simulations%s not used when %s is specified%s required with method %s%s specifies names which are not arguments to %s,LASPr[S = 0] is numerically equal to 0; impossible to start the recursionbreaksbycdfchi-squarecoinsurance must be between 0 and 1coverage modifications must be positivedeductible must be smaller than the limitempty regression model; fitting with Buhlmann-Straub's modelexpressions in %s and %s must be namedformulafrequency distribution must be supplied as a character stringfrequency distribution not in the (a, b, 0) or (a, b, 1) familiesfrequency must be larger than 0 in all groupsfunfunction not defined for approximating distributionsgroupgrouped.datahhierarchical regression models not supportedimpossible to replace boundaries and frequencies simultaneouslyinfinite group boundariesinternal errorinvalid %s specificationinvalid first argument %sinvalid level nameinvalid level numberinvalid number of group boundaries and frequenciesinvalid parameters in %sinvalid third argument %sinvalid values in %slambdalevlevel names different in %s, %s and %slower bound of the likelihood missingmaximum number of iterations reached before obtaining convergencemgf.claimmgf.waitmissing frequencies replaced by zerosmissing ratios not allowed when weights are not suppliedmissing values are not in the same positions in %s and in %smissing values are not in the same positions in 'weights' and in 'ratios'model.freqmodel.sevmodelsmomentsnnb.simulnclassneed 0, 1, or 2 subscriptsno available data to fit modelno positive probabilitiesnodesnothing to doone of %s or %s is neededone of %s or %s must be non-NULLone of the Beta prior parameter %s or %s missingone of the Gamma prior parameter %s, %s or %s missingonly logical matrix subscripts are allowed in replacementoptimization failedorderp0par.claimspar.waitparameter %s missing in %sparameter %s of the likelihood missingparameter %s or %s missing in %sparameters %s missing in %spremium.rateprobprobabilityrateratesratiosratios have to be supplied if weights arerows extracted in increasing orderscalesd.likshapeshape.likshape1shape2sizestartthere must be at least one node with more than one period of experiencethere must be at least two nodes at every levelthere must be more than one nodeunbiasedunsupported interactions in %sunsupported likelihoodvalue of %s ignored with a zero-truncated distributionvalue of %s missingvalue of %s or %s missingweightsxyProject-Id-Version: actuar 3.3-4 PO-Revision-Date: 2023-11-07 14:41 Last-Translator: Automatically generated Language-Team: none Language: en MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); !freq%s has many elements: only the first used%s ignored when %s is specified%s is an alias for %s, however they differ.%s measure requires an object of class %s%s must be a function or an expression containing %s%s must be a function or an expression containing %s and %s%s must be a function when using reinsurance%s must be a named list%s must be a numeric vector or an object of class %s%s must be a valid probability (between 0 and 1)%s must be a vector of probabilities%s must be a vector or a matrix%s must be positive%s must be supplied%s must be supplied as a function%s must supply the mean and variance of the distribution%s must supply the mean, variance and skewness of the distribution%s must supply the number of simulations%s not used when %s is specified%s required with method %s%s specifies names which are not arguments to %s,LASPr[S = 0] is numerically equal to 0; impossible to start the recursionbreaksbycdfchi-squarecoinsurance must be between 0 and 1coverage modifications must be positivedeductible must be smaller than the limitempty regression model; fitting with Buhlmann-Straub's modelexpressions in %s and %s must be namedformulafrequency distribution must be supplied as a character stringfrequency distribution not in the (a, b, 0) or (a, b, 1) familiesfrequency must be larger than 0 in all groupsfunfunction not defined for approximating distributionsgroupgrouped.datahhierarchical regression models not supportedimpossible to replace boundaries and frequencies simultaneouslyinfinite group boundariesinternal errorinvalid %s specificationinvalid first argument %sinvalid level nameinvalid level numberinvalid number of group boundaries and frequenciesinvalid parameters in %sinvalid third argument %sinvalid values in %slambdalevlevel names different in %s, %s and %slower bound of the likelihood missingmaximum number of iterations reached before obtaining convergencemgf.claimmgf.waitmissing frequencies replaced by zerosmissing ratios not allowed when weights are not suppliedmissing values are not in the same positions in %s and in %smissing values are not in the same positions in ‘weights’ and in ‘ratios’model.freqmodel.sevmodelsmomentsnnb.simulnclassneed 0, 1, or 2 subscriptsno available data to fit modelno positive probabilitiesnodesnothing to doone of %s or %s is neededone of %s or %s must be non-NULLone of the Beta prior parameter %s or %s missingone of the Gamma prior parameter %s, %s or %s missingonly logical matrix subscripts are allowed in replacementoptimization failedorderp0par.claimspar.waitparameter %s missing in %sparameter %s of the likelihood missingparameter %s or %s missing in %sparameters %s missing in %spremium.rateprobprobabilityrateratesratiosratios have to be supplied if weights arerows extracted in increasing orderscalesd.likshapeshape.likshape1shape2sizestartthere must be at least one node with more than one period of experiencethere must be at least two nodes at every levelthere must be more than one nodeunbiasedunsupported interactions in %sunsupported likelihoodvalue of %s ignored with a zero-truncated distributionvalue of %s missingvalue of %s or %s missingweightsxyactuar/inst/po/en@quot/LC_MESSAGES/actuar.mo0000644000176200001440000000672115147745722020164 0ustar liggesusers!$/, /:'8b++0 $51g z     &@"g#####(>)g/AUk/3 :5 8p + + 0 2 5? u - &N "u # # # # #( (L )u  / A U# y        !'A' is 0-diml'order' (%.2f) must be integer, rounded to %.0fLAPACK routine dgebal returned info code %d when permutingLAPACK routine dgebal returned info code %d when scalingLAPACK routine dgetrf returned info code %dLAPACK routine dgetrs returned info code %dLapack routine dgesv: system is exactly singularNAs producedargument %d of Lapack routine dgesv had invalid valueintegration failedinternal error in actuar_do_dpq1internal error in actuar_do_dpq2internal error in actuar_do_dpq3internal error in actuar_do_dpq4internal error in actuar_do_dpq5internal error in actuar_do_dpq6internal error in actuar_do_dpqphtype2internal error in actuar_do_randominternal error in actuar_do_random1internal error in actuar_do_random2internal error in actuar_do_random3internal error in actuar_do_random4internal error in actuar_do_random5internal error in actuar_do_randomphtypeinternal error in actuar_do_randomphtype2invalid argumentsmaximum number of iterations must be at least 1maximum number of iterations reached before obtaining convergencemaximum number of recursions reached before the probability distribution was completeno right-hand side in 'B'non-conformable argumentsnon-square sub-intensity matrixProject-Id-Version: actuar 3.3-4 Report-Msgid-Bugs-To: PO-Revision-Date: 2023-11-07 14:41-0500 Last-Translator: Automatically generated Language-Team: none Language: en MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); ‘A’ is 0-diml‘order’ (%.2f) must be integer, rounded to %.0fLAPACK routine dgebal returned info code %d when permutingLAPACK routine dgebal returned info code %d when scalingLAPACK routine dgetrf returned info code %dLAPACK routine dgetrs returned info code %dLapack routine dgesv: system is exactly singularNAs producedargument %d of Lapack routine dgesv had invalid valueintegration failedinternal error in actuar_do_dpq1internal error in actuar_do_dpq2internal error in actuar_do_dpq3internal error in actuar_do_dpq4internal error in actuar_do_dpq5internal error in actuar_do_dpq6internal error in actuar_do_dpqphtype2internal error in actuar_do_randominternal error in actuar_do_random1internal error in actuar_do_random2internal error in actuar_do_random3internal error in actuar_do_random4internal error in actuar_do_random5internal error in actuar_do_randomphtypeinternal error in actuar_do_randomphtype2invalid argumentsmaximum number of iterations must be at least 1maximum number of iterations reached before obtaining convergencemaximum number of recursions reached before the probability distribution was completeno right-hand side in ‘B’non-conformable argumentsnon-square sub-intensity matrixactuar/inst/NEWS.0.Rd0000644000176200001440000004646215147745722013741 0ustar liggesusers\name{NEWS} \title{actuar News} \encoding{UTF-8} \section{LATER NEWS}{ This file covers NEWS for the 0.x series. News for \pkg{actuar} 1.0-0 and later can be found in file \file{NEWS.1.Rd}. } \section{CHANGES IN VERSION 0.9-7}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{plot} method for function objects returned by \code{ruin()}. } } \subsection{BUG FIXES}{ \itemize{ \item Calculation of the Bühlmann-Gisler and Ohlsson estimators was incorrect for hierarchical models with more than one level. \item Better display of first column for grouped data objects. \item Miscellaneous corrections to the vignettes. } } } \section{CHANGES IN VERSION 0.9-6}{ \itemize{ \item Accented letters in comments removed to avoid compilation problems under MacOS X on CRAN (see thread starting at \url{https://stat.ethz.ch/pipermail/r-devel/2008-February/048391.html}). } } \section{CHANGES IN VERSION 0.9-5}{ \subsection{NEW FEATURES}{ \itemize{ \item New \code{simulation} vignette on usage of function \code{simul()}. Most of the material was previously in the \code{credibility} vignette. \item Examples of \code{ruin()} and \code{adjCoef()} added to the \code{risk} demo. } } \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item Following some negative comments on a function name VG had been using for years, function \code{simpf()} is renamed to \code{simul()} and the class of the output from \code{simpf} to \code{portfolio}. \item The components of the list returned by \code{severity.portfolio()} are renamed from \code{"first"} and \code{"last"} to \code{"main"} and \code{"split"}, respectively. } } \subsection{BUG FIXES}{ \itemize{ \item \code{levinvgauss()} returned wrong results. \item Restructuring of the weights matrix in \code{simpf()} may fail with an incorrect number of columns. \item Fixed index entry of the credibility theory vignette. \item \code{adjCoef()} would only accept as argument \code{h} a function named \code{h}. \item \code{ruin()} built incorrect probability vector and intensity matrix for mixture of Erlangs. \item \code{CTE.aggregateDist()} sometimes gave values smaller than the VaR for recursive and simulation methods. } } } \section{CHANGES IN VERSION 0.9-4}{ \itemize{ \item Maintenance and new features release. } \subsection{NEW FEATURES -- LOSS DISTRIBUTIONS}{ \itemize{ \item Functions \code{mgffoo()} to compute the moment (or cumulant if \code{log = TRUE}) generating function of the following distributions: chi-square, exponential, gamma, inverse gaussian (from package \pkg{SuppDists}), inverse gamma, normal, uniform and phase-type (see below). \item Functions \code{mfoo()} to compute the raw moments of all the probability distributions supported in the package and the following of base R: chi-square, exponential, gamma, inverse gaussian (from package \pkg{SuppDists}), inverse gamma, normal, uniform. \item Functions \code{phtype()} to compute the probability density function, cumulative distribution function, moment generating function, raw moments of, and to generate variates from, phase-type distributions. } } \subsection{NEW FEATURES -- RISK THEORY}{ \itemize{ \item Function \code{VaR()} with a method for objects of class \code{"aggregateDist"} to compute the Value at Risk of a distribution. \item Function \code{CTE()} with a method for objects of class \code{"aggregateDist"} to compute the Conditional Tail Expectation of a distribution. \item Function \code{adjCoef()} to compute the adjustment coefficient in ruin theory. If proportional or excess-of-loss reinsurance is included in the model, \code{adjCoef()} returns a function to compute the adjustment coefficient for given limits. A plot method is also included. \item Function \code{ruin()} returns a function to compute the infinite time probability of ruin for given initial surpluses in the Cramér-Lundberg and Sparre Andersen models. Most calculations are done using the cdf of phase-type distributions as per Asmussen and Rolski (1991). \item Calculations of the aggregate claim distribution using the recursive method much faster now that recursions are done in C. } } \subsection{NEW FEATURES -- CREDIBILITY THEORY}{ \itemize{ \item Modular rewrite of \code{cm()}: the function now calls internal functions to carry calculations for each supported credibility model. This is more efficient. \item Basic support for the regression model of Hachemeister in function \code{cm()}. \item For the hierarchical credibility model: support for the variance components estimators of Bühlmann and Gisler (2005) and Ohlsson (2005). Support remains for iterative pseudo-estimators. \item Calculations of iterative pseudo-estimators in hierarchical credibility are much faster now that they are done in C. } } \subsection{OTHER NEW FEATURES}{ \itemize{ \item Four new vignettes: introduction to the package and presentation of the features in loss distributions, risk theory and credibility theory. \item Portfolio simulation material of the \code{credibility} demo moved to demo \code{simulation}. } } \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item Argument \code{approx.lin} of \code{quantile.aggregateDist()} renamed \code{smooth}. \item Function \code{aggregateDist()} gains a \code{maxit} argument for the maximum number of recursions when using Panjer's algorithm. This is to avoid infinite recursion when the cumulative distribution function does not converge to 1. \item Function \code{cm()} gains a \code{maxit} argument for the maximum number of iterations in pseudo-estimators calculations. \item Methods of \code{aggregate()}, \code{frequency()}, \code{severity()} and \code{weights()} for objects of class \code{"simpf"} gain two new arguments: \enumerate{ \item \code{classification}; when \code{TRUE}, the columns giving the classification structure of the portfolio are excluded from the result. This eases calculation of loss ratios (aggregate claim amounts divided by the weights); \item \code{prefix}; specifies a prefix to use in column names, with sensible defaults to avoid name clashes for data and weight columns. } } } \subsection{BUG FIXES}{ \itemize{ \item The way weights had to be specified for the \code{"chi-square"} method of \code{mde()} to give expected results was very unintuitive. The fix has no effect when using the default weights. \item The empirical step function returned by the \code{"recursive"} and \code{"convolution"} methods of \code{aggregateDist()} now correctly returns 1 when evaluated past its largest knot. } } \subsection{DEPRECATED}{ \itemize{ \item Direct usage of \code{bstraub()} is now deprecated in favor of \code{cm()}. The function will remain in the package since it is used internally by \code{cm()}, but it will not be exported in future releases of the package. The current format of the results is also deprecated. } } } \section{CHANGES IN VERSION 0.9-3}{ \subsection{DEPRECATED, DEFUNCT OR NO BACKWARD COMPATIBILITY}{ \itemize{ \item The user interface of \code{coverage()} has changed. Instead of taking in argument the name of a probability law (say \code{foo}) and require that functions \code{dfoo()} and \code{pfoo()} exist, \code{coverage()} now requires a function name or function object to compute the cdf of the unmodified random variable and a function name or function object to compute the pdf. If both functions are provided, \code{coverage()} returns a function to compute the pdf of the modified random variable; if only the cdf is provided, \code{coverage()} returns the cdf of the modified random variable. Hence, argument \code{cdf} is no longer a boolean. The new interface is more in line with other functions of the package. } } \subsection{BUG FIXES}{ \itemize{ \item Methods of \code{summary()} and \code{print.summary()} for objects of class \code{"cm"} were not declared in the NAMESPACE file. \item Various fixes to the demo files. } } } \section{CHANGES IN VERSION 0.9-2}{ \itemize{ Major official update. This version is not backward compatible with the 0.1-x series. Features of the package can be split in the following categories: loss distributions modeling, risk theory, credibility theory. } \subsection{NEW FEATURES -- LOSS DISTRIBUTIONS}{ \itemize{ \item Functions \code{[dpqr]foo()} to compute the density function, cumulative distribution function, quantile function of, and to generate variates from, all probability distributions of Appendix A of Klugman et al. (2004), \emph{Loss Models, Second Edition} (except the inverse gaussian and log-t) not already in R. Namely, this adds the following distributions (the root is what follows the \code{d}, \code{p}, \code{q} or \code{r} in function names): \tabular{ll}{ DISTRIBUTION NAME \tab ROOT \cr Burr \tab \code{burr} \cr Generalized beta \tab \code{genbeta} \cr Generalized Pareto \tab \code{genpareto} \cr Inverse Burr \tab \code{invburr} \cr Inverse exponential \tab \code{invexp} \cr Inverse gamma \tab \code{invgamma} \cr Inverse Pareto \tab \code{invpareto} \cr Inverse paralogistic \tab \code{invparalogis} \cr Inverse transformed gamma \tab \code{invtrgamma} \cr Inverse Weibull \tab \code{invweibull} \cr Loggamma \tab \code{loggamma} \cr Loglogistic \tab \code{llogis} \cr Paralogistic \tab \code{paralogis} \cr Pareto \tab \code{pareto} \cr Single parameter Pareto \tab \code{pareto1} \cr Transformed beta \tab \code{trbeta} \cr Transformed gamma \tab \code{trgamma} } All functions are coded in C for efficiency purposes and should behave exactly like the functions in base R. For all distributions that have a scale parameter, the corresponding functions have \code{rate = 1} and \code{scale = 1/rate} arguments. \item Functions \code{foo()} to compute the \eqn{k}-th raw (non-central) moment and \eqn{k}-th limited moment for all the probability distributions mentioned above, plus the following ones of base R: beta, exponential, gamma, lognormal and Weibull. \item Facilities to store and manipulate grouped data (stored in an interval-frequency fashion). Function \code{grouped.data()} creates a grouped data object similar to a data frame. Methods of \code{"["}, \code{"[<-"}, \code{mean()} and \code{hist()} created for objects of class \code{"grouped.data"}. \item Function \code{ogive()} --- with appropriate methods of \code{knots()}, \code{plot()}, \code{print()} and \code{summary()} --- to compute the ogive of grouped data. Usage is in every respect similar to \code{stats:::ecdf()}. \item Function \code{elev()} to compute the empirical limited expected value of a sample of individual or grouped data. \item Function emm() to compute the k-th empirical raw (non-central) moment of a sample of individual or grouped data. \item Function \code{mde()} to compute minimum distance estimators from a sample of individual or grouped data using one of three distance measures: Cramer-von Mises (CvM), chi-square, layer average severity (LAS). Usage is similar to \code{fitdistr()} of package \pkg{MASS}. \item Function \code{coverage()} to obtain the pdf or cdf of the payment per payment or payment per loss random variable under any combination of the following coverage modifications: ordinary of franchise deductible, policy limit, coinsurance, inflation. The result is a function that can be used in fitting models to data subject to such coverage modifications. \item Individual dental claims data set \code{dental} and grouped dental claims data set \code{gdental} of Klugman et al. (2004), \emph{Loss Models, Second Edition}. } } \subsection{NEW FEATURES -- RISK THEORY}{ \itemize{ \item Function \code{aggregateDist()} returns a function to compute the cumulative distribution function of the total amount of claims random variable for an insurance portfolio using any of the following five methods: \enumerate{ \item exact calculation by convolutions (using function convolve() of package \pkg{stats}; \item recursive calculation using Panjer's algorithm; \item normal approximation; \item normal power approximation; \item simulation. } The modular conception of \code{aggregateDist()} allows for easy inclusion of additional methods. There are special methods of \code{print()}, \code{summary()}, \code{quantile()} and \code{mean()} for objects of class \code{"aggregateDist"}. The objects otherwise inherit from classes \code{"ecdf"} (for methods 1, 2 and 3) and \code{"function"}. See also the DEPRECATED, DEFUNCT OR NO BACKWARD COMPATIBILITY section below. \item Function \code{discretize()} to discretize a continuous distribution using any of the following four methods: \enumerate{ \item upper discretization, where the discretized cdf is always above the true cdf; \item lower discretization, where the discretized cdf is always under the true cdf; \item rounding, where the true cdf passes through the midpoints of the intervals of the discretized cdf; \item first moment matching of the discretized and true distributions. } Usage is similar to \code{curve()} of package \pkg{graphics}. Again, the modular conception allows for easy inclusion of additional discretization methods. } } \subsection{NEW FEATURES -- CREDIBILITY THEORY}{ \itemize{ \item Function \code{simpf()} can now simulate data for hierarchical portfolios of any number of levels. Model specification changed completely; see the DEPRECATED, DEFUNCT OR NO BACKWARD COMPATIBILITY below. The function is also significantly (\eqn{\sim 10\times}{~10x}) faster than the previous version. \item Generic function \code{severity()} defined mostly to provide a method for objects of class \code{"simpf"}; see below. \item Methods of \code{aggregate()}, \code{frequency()}, \code{severity()} and \code{weights()} to extract information from objects of class \code{"simpf"}: \enumerate{ \item \code{aggregate()} returns the matrix of aggregate claim amounts per node; \item \code{frequency()} returns the matrix of the number of claims per node; \item \code{severity()} returns the matrix of individual claim amounts per node; \item \code{weights()} returns the matrix of weights corresponding to the data. } Summaries can be done in various ways; see \code{?simpf.summaries} \item Function \code{cm()} (for \emph{c}redibility \emph{m}odel) to compute structure parameters estimators for hierarchical credibility models, including the Bühlmann and Bühlmann-Straub models. Usage is similar to \code{lm()} of package \pkg{stats} in that the hierarchical structure is specified by means of a formula object and data is extracted from a matrix or data frame. There are special methods of \code{print()}, \code{summary()} for objects of class \code{"cm"}. Credibility premiums are computed using a method of \code{predict()}; see below. For simple Bühlmann and Bühlmann-Straub models, \code{bstraub()} remains simpler to use and faster. \item Function \code{bstraub()} now returns an object of class \code{"bstraub"} for which there exist print and summary methods. The function no longer computes the credibility premiums; see the DEPRECATED, DEFUNCT OR NO BACKWARD COMPATIBILITY section below. \item Methods of \code{predict()} for objects of class \code{"cm"} and \code{"bstraub"} created to actually compute the credibility premiums of credibility models. Function \code{predict.cm()} can return the premiums for specific levels of a hierarchical portfolio only. } } \subsection{OTHER NEW FEATURES}{ \itemize{ \item Function \code{unroll()} to unlist a list with a \code{"dim"} attribute of length 0, 1 or 2 (that is, a vector or matrix of vectors) according to a specific dimension. Currently identical to \code{severity.default()} by lack of a better usage of the default method of \code{severity()}. \item Three new demos corresponding to the three main fields of actuarial science covered by the package. \item French translations of the error and warning messages. \item The package now has a name space. } } \subsection{DEPRECATED, DEFUNCT OR NO BACKWARD COMPATIBILITY}{ \itemize{ \item Function \code{panjer()}, although still present in the package, should no longer be used directly. Recursive calculation of the aggregate claim amount should be done with \code{aggregateDist()}. Further, the function is not backward compatible: model specification has changed, discretization of the claim amount distribution should now be done with \code{discretize()}, and the function now returns a function to compute the cdf instead of a simple vector of probabilities. \item Model specification for \code{simpf()} changed completely and is not backward compatible with previous versions of the package. The new scheme allows for much more general models. \item Function \code{rearrangepf()} is defunct and has been replaced by methods of \code{aggregate()}, \code{frequency()} and \code{severity()}. \item Function \code{bstraub()} no longer computes the credibility premiums. One should now instead use \code{predict()} for this. \item The data set \code{hachemeister} is no longer a list but rather a matrix with a state specification. } } } \section{CHANGES IN VERSION 0.1-3}{ \itemize{ \item Fixed the dependency on R >= 2.1.0 since the package uses function \code{isTRUE()}. } } \section{CHANGES IN VERSION 0.1-2}{ \itemize{ \item First public release. \item Fixed an important bug in \code{bstraub()}: when calculating the range of the weights matrix, \code{NA}s were not excluded. \item Miscellaneous documentation corrections. } } \section{CHANGES IN VERSION 0.1-1}{ \itemize{ \item Initial release. \item Contains functions \code{bstraub()}, \code{simpf()}, \code{rearrangepf()} and \code{panjer()}, and the dataset \code{hachemeister}. } } actuar/inst/doc/0000755000176200001440000000000015151412457013260 5ustar liggesusersactuar/inst/doc/actuar.R0000644000176200001440000000072415151412373014662 0ustar liggesusers### R code from vignette source 'actuar.Rnw' ################################################### ### code chunk number 1: actuar.Rnw:48-50 (eval = FALSE) ################################################### ## vignette(package = "actuar") ## demo(package = "actuar") ################################################### ### code chunk number 2: actuar.Rnw:59-61 (eval = FALSE) ################################################### ## citation() ## citation("actuar") actuar/inst/doc/coverage.R0000644000176200001440000001212715151412373015176 0ustar liggesusers### R code from vignette source 'coverage.Rnw' ################################################### ### code chunk number 1: coverage.Rnw:11-12 ################################################### library(actuar) ################################################### ### code chunk number 2: coverage.Rnw:57-59 ################################################### deductible <- 5 limit <- 13 ################################################### ### code chunk number 3: coverage.Rnw:64-75 ################################################### pgammaL <- coverage(cdf = pgamma, deductible = deductible, limit = limit, per.loss = TRUE) dgammaL <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, per.loss = TRUE) pgammaP <- coverage(cdf = pgamma, deductible = deductible, limit = limit) dgammaP <- coverage(dgamma, pgamma, deductible = deductible, limit = limit) d <- deductible u <- limit - d e <- 0.001 ylim <- c(0, dgammaL(0, 5, 0.6)) ################################################### ### code chunk number 4: coverage.Rnw:100-106 ################################################### par(mar = c(2, 3, 1, 1)) curve(pgammaP(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaP(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, u), labels = c("0", "u - d")) ################################################### ### code chunk number 5: coverage.Rnw:123-129 ################################################### par(mar = c(2, 3, 1, 1)) curve(dgammaP(x, 5, 0.6), from = 0 + e, to = u - e, xlim = c(0, limit), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) points(u, dgammaP(u, 5, 0.6), pch = 16) axis(1, at = c(0, u), labels = c("0", "u - d")) ################################################### ### code chunk number 6: coverage.Rnw:159-165 ################################################### par(mar = c(2, 3, 1, 1)) curve(pgammaL(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaL(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, u), labels = c("0", "u - d")) ################################################### ### code chunk number 7: coverage.Rnw:180-186 ################################################### par(mar = c(2, 3, 1, 1)) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = u - e, xlim = c(0, limit), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) points(c(0, u), dgammaL(c(0, u), 5, 0.6), pch = 16) axis(1, at = c(0, u), labels = c("0", "u - d")) ################################################### ### code chunk number 8: coverage.Rnw:194-207 ################################################### pgammaL <- coverage(cdf = pgamma, deductible = deductible, limit = limit, per.loss = TRUE, franchise = TRUE) dgammaL <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, per.loss = TRUE, franchise = TRUE) pgammaP <- coverage(cdf = pgamma, deductible = deductible, limit = limit, franchise = TRUE) dgammaP <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, franchise = TRUE) d <- deductible u <- limit e <- 0.001 ylim <- c(0, dgammaL(0, 5, 0.6)) ################################################### ### code chunk number 9: coverage.Rnw:232-238 ################################################### par(mar = c(2, 3, 1, 1)) curve(pgammaP(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit + d), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaP(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) ################################################### ### code chunk number 10: coverage.Rnw:255-262 ################################################### par(mar = c(2, 3, 1, 1)) curve(dgammaP(x, 5, 0.6), from = d + e, to = u - e, xlim = c(0, limit + d), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = d, add = TRUE, lwd = 2) points(u, dgammaP(u, 5, 0.6), pch = 16) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) ################################################### ### code chunk number 11: coverage.Rnw:292-298 ################################################### par(mar = c(2, 3, 1, 1)) curve(pgammaL(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit + d), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaL(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) ################################################### ### code chunk number 12: coverage.Rnw:312-319 ################################################### par(mar = c(2, 3, 1, 1)) curve(dgammaL(x, 5, 0.6), from = d + e, to = u - e, xlim = c(0, limit + d), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = d, add = TRUE, lwd = 2) points(c(0, u), dgammaL(c(0, u), 5, 0.6), pch = 16) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) actuar/inst/doc/risk.R0000644000176200001440000002233115151412373014351 0ustar liggesusers### R code from vignette source 'risk.Rnw' ################################################### ### code chunk number 1: risk.Rnw:17-19 ################################################### library(actuar) options(width = 52, digits = 4) ################################################### ### code chunk number 2: risk.Rnw:159-185 ################################################### fu <- discretize(plnorm(x), method = "upper", from = 0, to = 5) fl <- discretize(plnorm(x), method = "lower", from = 0, to = 5) fr <- discretize(plnorm(x), method = "rounding", from = 0, to = 5) fb <- discretize(plnorm(x), method = "unbiased", from = 0, to = 5, lev = levlnorm(x)) par(mfrow = c(2, 2), mar = c(5, 2, 4, 2)) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Upper", ylab = "F(x)") plot(stepfun(0:4, diffinv(fu)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Lower", ylab = "F(x)") plot(stepfun(0:5, diffinv(fl)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Rounding", ylab = "F(x)") plot(stepfun(0:4, diffinv(fr)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Unbiased", ylab = "F(x)") plot(stepfun(0:5, diffinv(fb)), pch = 20, add = TRUE) ## curve(plnorm(x), from = 0, to = 5, lwd = 2, ylab = "F(x)") ## par(col = "blue") ## plot(stepfun(0:4, diffinv(fu)), pch = 19, add = TRUE) ## par(col = "red") ## plot(stepfun(0:5, diffinv(fl)), pch = 19, add = TRUE) ## par(col = "green") ## plot(stepfun(0:4, diffinv(fr)), pch = 19, add = TRUE) ## par(col = "magenta") ## plot(stepfun(0:5, diffinv(fb)), pch = 19, add = TRUE) ## legend(3, 0.3, legend = c("upper", "lower", "rounding", "unbiased"), ## col = c("blue", "red", "green", "magenta"), lty = 1, pch = 19, ## text.col = "black") ################################################### ### code chunk number 3: risk.Rnw:199-204 (eval = FALSE) ################################################### ## fx <- discretize(pgamma(x, 2, 1), method = "upper", ## from = 0, to = 17, step = 0.5) ## fx <- discretize(pgamma(x, 2, 1), method = "unbiased", ## lev = levgamma(x, 2, 1), ## from = 0, to = 17, step = 0.5) ################################################### ### code chunk number 4: risk.Rnw:324-331 ################################################### fx <- discretize(pgamma(x, 2, 1), method = "unbiased", from = 0, to = 22, step = 0.5, lev = levgamma(x, 2, 1)) Fs <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 10, x.scale = 0.5) summary(Fs) ################################################### ### code chunk number 5: risk.Rnw:335-339 ################################################### Fsc <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 5, convolve = 1, x.scale = 0.5) summary(Fsc) ################################################### ### code chunk number 6: risk.Rnw:343-344 ################################################### knots(Fs) ################################################### ### code chunk number 7: risk.Rnw:348-350 (eval = FALSE) ################################################### ## plot(Fs, do.points = FALSE, verticals = TRUE, ## xlim = c(0, 60)) ################################################### ### code chunk number 8: risk.Rnw:354-355 ################################################### plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) ################################################### ### code chunk number 9: risk.Rnw:366-369 ################################################### mean(Fs) quantile(Fs) quantile(Fs, 0.999) ################################################### ### code chunk number 10: risk.Rnw:374-375 ################################################### diff(Fs) ################################################### ### code chunk number 11: risk.Rnw:397-399 ################################################### VaR(Fs) CTE(Fs) ################################################### ### code chunk number 12: risk.Rnw:407-433 ################################################### fx.u <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "upper") Fs.u <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.u, lambda = 10, x.scale = 0.5) fx.l <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "lower") Fs.l <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.l, lambda = 10, x.scale = 0.5) Fs.n <- aggregateDist("normal", moments = c(20, 60)) Fs.s <- aggregateDist("simulation", model.freq = expression(y = rpois(10)), model.sev = expression(y = rgamma(2, 1)), nb.simul = 10000) par(col = "black") plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60), sub = "") par(col = "blue") plot(Fs.u, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "red") plot(Fs.l, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "green") plot(Fs.s, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "magenta") plot(Fs.n, add = TRUE, sub = "") legend(30, 0.4, c("recursive + unbiased", "recursive + upper", "recursive + lower", "simulation", "normal approximation"), col = c("black", "blue", "red", "green", "magenta"), lty = 1, text.col = "black") ################################################### ### code chunk number 13: risk.Rnw:525-527 ################################################### adjCoef(mgf.claim = mgfexp(x), mgf.wait = mgfexp(x, 2), premium.rate = 2.4, upper = 1) ################################################### ### code chunk number 14: risk.Rnw:557-564 ################################################### mgfx <- function(x, y) mgfexp(x * y) p <- function(x) 2.6 * x - 0.2 rho <- adjCoef(mgfx, mgfexp(x, 2), premium = p, upper = 1, reins = "prop", from = 0, to = 1) rho(c(0.75, 0.8, 0.9, 1)) plot(rho) ################################################### ### code chunk number 15: risk.Rnw:569-570 ################################################### plot(rho) ################################################### ### code chunk number 16: risk.Rnw:670-674 ################################################### psi <- ruin(claims = "e", par.claims = list(rate = 5), wait = "e", par.wait = list(rate = 3)) psi psi(0:10) ################################################### ### code chunk number 17: risk.Rnw:679-680 ################################################### op <- options(width=50) ################################################### ### code chunk number 18: risk.Rnw:682-686 ################################################### ruin(claims = "e", par.claims = list(rate = c(3, 7), weights = 0.5), wait = "e", par.wait = list(rate = 3)) ################################################### ### code chunk number 19: risk.Rnw:692-698 ################################################### prob <- c(0.5614, 0.4386) rates <- matrix(c(-8.64, 0.101, 1.997, -1.095), 2, 2) ruin(claims = "p", par.claims = list(prob = prob, rates = rates), wait = "e", par.wait = list(rate = c(5, 1), weights = c(0.4, 0.6))) ################################################### ### code chunk number 20: risk.Rnw:704-710 ################################################### psi <- ruin(claims = "p", par.claims = list(prob = prob, rates = rates), wait = "e", par.wait = list(rate = c(5, 1), weights = c(0.4, 0.6))) plot(psi, from = 0, to = 50) ################################################### ### code chunk number 21: risk.Rnw:712-713 ################################################### options(op) ################################################### ### code chunk number 22: risk.Rnw:718-719 ################################################### plot(psi, from = 0, to = 50) ################################################### ### code chunk number 23: risk.Rnw:788-798 ################################################### f.L <- discretize(ppareto(x, 4, 4), from = 0, to = 200, step = 1, method = "lower") f.U <- discretize(ppareto(x, 4, 4), from = 0, to = 200, step = 1, method = "upper") F.L <- aggregateDist(method = "recursive", model.freq = "geometric", model.sev = f.L, prob = 1/6) F.U <- aggregateDist(method = "recursive", model.freq = "geometric", model.sev = f.U, prob = 1/6) ################################################### ### code chunk number 24: risk.Rnw:804-810 ################################################### psi.L <- function(u) 1 - F.U(u) psi.U <- function(u) 1 - F.L(u) u <- seq(0, 50, by = 5) cbind(lower = psi.L(u), upper = psi.U(u)) curve(psi.L, from = 0, to = 100, col = "blue") curve(psi.U, add = TRUE, col = "green") ################################################### ### code chunk number 25: risk.Rnw:815-817 ################################################### curve(psi.L, from = 0, to = 100, col = "blue") curve(psi.U, add = TRUE, col = "green") actuar/inst/doc/credibility.Rnw0000644000176200001440000006416115147745722016274 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Credibility theory} %\VignettePackage{actuar} %\SweaveUTF8 \title{Credibility theory features of \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Xavier Milhaud \\ Université Claude Bernard Lyon 1 \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} <>= library(actuar) options(width = 57, digits = 4, deparse.cutoff = 30L) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} Credibility models are actuarial tools to distribute premiums fairly among a heterogeneous group of policyholders (henceforth called \emph{entities}). More generally, they can be seen as prediction methods applicable in any setting where repeated measures are made for subjects with different risk levels. The credibility theory features of \pkg{actuar} consist of matrix \code{hachemeister} containing the famous data set of \cite{Hachemeister_75} and function \code{cm} to fit hierarchical (including Bühlmann, Bühlmann-Straub), regression and linear Bayes credibility models. Furthermore, function \code{rcomphierarc} can simulate portfolios of data satisfying the assumptions of the aforementioned credibility models; see the \code{"simulation"} vignette for details. \section{Hachemeister data set} \label{sec:hachemeister} The data set of \cite{Hachemeister_75} consists of private passenger bodily injury insurance average claim amounts, and the corresponding number of claims, for five U.S.\ states over 12 quarters between July 1970 and June 1973. The data set is included in the package in the form of a matrix with 5 rows and 25 columns. The first column contains a state index, columns 2--13 contain the claim averages and columns 14--25 contain the claim numbers: <>= data(hachemeister) hachemeister @ \section{Hierarchical credibility model} \label{sec:hierarchical} The linear model fitting function of R is \code{lm}. Since credibility models are very close in many respects to linear models, and since the credibility model fitting function of \pkg{actuar} borrows much of its interface from \code{lm}, we named the credibility function \code{cm}. Function \code{cm} acts as a unified interface for all credibility models supported by the package. Currently, these are: the unidimensional models of \cite{Buhlmann_69} and \cite{BS_70}; the hierarchical model of \cite{Jewell_75} (of which the first two are special cases); the regression model of \cite{Hachemeister_75}, optionally with the intercept at the barycenter of time \citep[Section~8.4]{Buhlmann_Gisler}; linear Bayes models. The modular design of \code{cm} makes it easy to add new models if desired. This section concentrates on usage of \code{cm} for hierarchical models. There are some variations in the formulas of the hierarchical model in the literature. We compute the credibility premiums as given in \cite{BJ_87} or \cite{Buhlmann_Gisler}, supporting three types of estimators of the between variance structure parameters: the unbiased estimators of \cite{Buhlmann_Gisler} (the default), the slightly different version of \cite{Ohlsson} and the iterative pseudo-estimators as found in \cite{LivreVert} or \cite{Goulet_JAP}. Consider an insurance portfolio where \emph{entities} are classified into \emph{cohorts}. In our terminology, this is a two-level hierarchical classification structure. The observations are claim amounts $S_{ijt}$, where index $i = 1, \dots, I$ identifies the cohort, index $j = 1, \dots, J_i$ identifies the entity within the cohort and index $t = 1, \dots, n_{ij}$ identifies the period (usually a year). To each data point corresponds a weight --- or volume --- $w_{ijt}$. Then, the best linear prediction for the next period outcome of a entity based on ratios $X_{ijt} = S_{ijt}/w_{ijt}$ is \begin{equation} \label{eq:hierarchical:premiums} \begin{split} \hat{\pi}_{ij} &= z_{ij} X_{ijw} + (1 - z_{ij}) \hat{\pi}_i \\ \hat{\pi}_i &= z_i X_{izw} + (1 - z_i) m, \end{split} \end{equation} with the credibility factors \begin{align*} z_{ij} &= \frac{w_{ij\pt}}{w_{ij\pt} + s^2/a}, & w_{ij\pt} &= \sum_{t = 1}^{n_{ij}} w_{ijt} \\ z_{i} &= \frac{z_{i\pt}}{z_{i\pt} + a/b}, & z_{i\pt} &= \sum_{j = 1}^{J_i} z_{ij} \end{align*} and the weighted averages \begin{align*} X_{ijw} &= \sum_{t = 1}^{n_{ij}} \frac{w_{ijt}}{w_{ij\pt}}\, X_{ijt} \\ X_{izw} &= \sum_{j = 1}^{J_i} \frac{z_{ij}}{z_{i\pt}}\, X_{ijw}. \end{align*} The estimator of $s^2$ is \begin{equation} \label{eq:s2} \hat{s}^2 = \frac{1}{\sum_{i = 1}^I \sum_{j = 1}^{J_i} (n_{ij} - 1)} \sum_{i = 1}^I \sum_{j = 1}^{J_i} \sum_{t = 1}^{n_{ij}} w_{ijt} (X_{ijt} - X_{ijw})^2. \end{equation} The three types of estimators for the variance components $a$ and $b$ are the following. First, let \begin{align*} A_i &= \sum_{j = 1}^{J_i} w_{ij\pt} (X_{ijw} - X_{iww})^2 - (J_i - 1) s^2 & c_i &= w_{i\pt\pt} - \sum_{j = 1}^{J_i} \frac{w_{ij\pt}^2}{w_{i\pt\pt}} \\ B &= \sum_{i = 1}^I z_{i\pt} (X_{izw} - \bar{X}_{zzw})^2 - (I - 1) a & d &= z_{\pt\pt} - \sum_{i = 1}^I \frac{z_{i\pt}^2}{z_{\pt\pt}}, \end{align*} with \begin{equation} \label{eq:Xbzzw} \bar{X}_{zzw} = \sum_{i = 1}^I \frac{z_{i\pt}}{z_{\pt\pt}}\, X_{izw}. \end{equation} (Hence, $\E{A_i} = c_i a$ and $\E{B} = d b$.) Then, the Bühlmann--Gisler estimators are \begin{align} \label{eq:ac-BG} \hat{a} &= \frac{1}{I} \sum_{i = 1}^I \max \left( \frac{A_i}{c_i}, 0 \right) \\ \label{eq:bc-BG} \hat{b} &= \max \left( \frac{B}{d}, 0 \right), \end{align} the Ohlsson estimators are \begin{align} \label{eq:ac-Ohl} \hat{a}^\prime &= \frac{\sum_{i = 1}^I A_i}{\sum_{i = 1}^I c_i} \\ \label{eq:bc-Ohl} \hat{b}^\prime &= \frac{B}{d} \end{align} and the iterative (pseudo-)estimators are \begin{align} \label{eq:at} \tilde{a} &= \frac{1}{\sum_{i = 1}^I (J_i - 1)} \sum_{i = 1}^I \sum_{j = 1}^{J_i} z_{ij} (X_{ijw} - X_{izw})^2 \\ \label{eq:bt} \tilde{b} &= \frac{1}{I - 1} \sum_{i = 1}^I z_i (X_{izw} - X_{zzw})^2, \end{align} where \begin{equation} \label{eq:Xzzw} X_{zzw} = \sum_{i = 1}^I \frac{z_i}{z_\pt}\, X_{izw}. \end{equation} Note the difference between the two weighted averages \eqref{eq:Xbzzw} and \eqref{eq:Xzzw}. See \cite{cm} for further discussion on this topic. Finally, the estimator of the collective mean $m$ is $\hat{m} = X_{zzw}$. The credibility modeling function \code{cm} assumes that data is available in the format most practical applications would use, namely a rectangular array (matrix or data frame) with entity observations in the rows and with one or more classification index columns (numeric or character). One will recognize the output format of \code{rcomphierarc} and its summary methods. Then, function \code{cm} works much the same as \code{lm}. It takes in argument: a formula of the form \code{\~{} terms} describing the hierarchical interactions in a data set; the data set containing the variables referenced in the formula; the names of the columns where the ratios and the weights are to be found in the data set. The latter should contain at least two nodes in each level and more than one period of experience for at least one entity. Missing values are represented by \code{NA}s. There can be entities with no experience (complete lines of \code{NA}s). In order to give an easily reproducible example, we group states 1 and 3 of the Hachemeister data set into one cohort and states 2, 4 and 5 into another. This shows that data does not have to be sorted by level. The fitted model below uses the iterative estimators of the variance components. <>= X <- cbind(cohort = c(1, 2, 1, 2, 2), hachemeister) fit <- cm(~cohort + cohort:state, data = X, ratios = ratio.1:ratio.12, weights = weight.1:weight.12, method = "iterative") fit @ The function returns a fitted model object of class \code{"cm"} containing the estimators of the structure parameters. To compute the credibility premiums, one calls a method of \code{predict} for this class. <>= predict(fit) @ One can also obtain a nicely formatted view of the most important results with a call to \code{summary}. <>= summary(fit) @ The methods of \code{predict} and \code{summary} can both report for a subset of the levels by means of an argument \code{levels}. <>= summary(fit, levels = "cohort") predict(fit, levels = "cohort") @ \section{Bühlmann and Bühlmann--Straub models} \label{sec:buhlmann} As mentioned above, the Bühlmann and Bühlmann--Straub models are simply one-level hierarchical models. In this case, the Bühlmann--Gisler and Ohlsson estimators of the between variance parameters are both identical to the usual \cite{BS_70} estimator \begin{equation} \label{eq:a-hat} \hat{a} = \frac{w_{\pt\pt}}{w_{\pt\pt}^2 - \sum_{i=1}^I w_{i\pt}^2} \left( \sum_{i=1}^I w_{i\pt} (X_{iw} - X_{ww})^2 - (I - 1) \hat{s}^2 \right), \end{equation} and the iterative estimator \begin{equation} \label{eq:a-tilde} \tilde{a} = \frac{1}{I - 1} \sum_{i = 1}^I z_i (X_{iw} - X_{zw})^2 \end{equation} is better known as the Bichsel--Straub estimator. To fit the Bühlmann model using \code{cm}, one simply does not specify any weights. <>= cm(~state, hachemeister, ratios = ratio.1:ratio.12) @ When weights are specified together with a one-level model, \code{cm} automatically fits the Bühlmann--Straub model to the data. In the example below, we use the Bichsel--Straub estimator for the between variance. <>= cm(~state, hachemeister, ratios = ratio.1:ratio.12, weights = weight.1:weight.12) @ \section{Regression model of Hachemeister} \label{sec:regression} The credibility regression model of \cite{Hachemeister_75} is a generalization of the Bühlmann--Straub model. If data shows a systematic trend, the latter model will typically under- or over-estimate the true premium of an entity. The idea of \citeauthor{Hachemeister_75} was to fit to the data a regression model where the parameters are a credibility weighted average of an entity's regression parameters and the group's parameters. In order to use \code{cm} to fit a credibility regression model to a data set, one simply has to supply as additional arguments \code{regformula} and \code{regdata}. The first one is a formula of the form \code{\~{} terms} describing the regression model, and the second is a data frame of regressors. That is, arguments \code{regformula} and \code{regdata} are in every respect equivalent to arguments \code{formula} and \code{data} of \code{lm}, with the minor difference that \code{regformula} does not need to have a left hand side (and is ignored if present). Below, we fit the model \begin{displaymath} X_{it} = \beta_0 + \beta_1 t + \varepsilon_t, \quad t = 1, \dots, 12 \end{displaymath} to the original data set of \cite{Hachemeister_75}. <>= fit <- cm(~state, hachemeister, regformula = ~ time, regdata = data.frame(time = 1:12), ratios = ratio.1:ratio.12, weights = weight.1:weight.12) fit @ To compute the credibility premiums, one has to provide the ``future'' values of the regressors as in \code{predict.lm}. <>= predict(fit, newdata = data.frame(time = 13)) @ It is well known that the basic regression model has a major drawback: there is no guarantee that the credibility regression line will lie between the collective and individual ones. This may lead to grossly inadequate premiums, as Figure~\ref{fig:state4} shows. \begin{figure}[t] \centering <>= plot(NA, xlim = c(1, 13), ylim = c(1000, 2000), xlab = "", ylab = "") x <- cbind(1, 1:12) lines(1:12, x %*% fit$means$portfolio, col = "blue", lwd = 2) lines(1:12, x %*% fit$means$state[, 4], col = "red", lwd = 2, lty = 2) lines(1:12, x %*% coefficients(fit$adj.models[[4]]), col = "darkgreen", lwd = 2, lty = 3) points(13, predict(fit, newdata = data.frame(time = 13))[4], pch = 8, col = "darkgreen") legend("bottomright", legend = c("collective", "individual", "credibility"), col = c("blue", "red", "darkgreen"), lty = 1:3) @ \caption{Collective, individual and credibility regression lines for State 4 of the Hachemeister data set. The point indicates the credibility premium.} \label{fig:state4} \end{figure} The solution proposed by \cite{Buhlmann:regression:1997} is simply to position the intercept not at time origin, but instead at the barycenter of time \citep[see also][Section~8.4]{Buhlmann_Gisler}. In mathematical terms, this essentially amounts to using an orthogonal design matrix. By setting the argument \code{adj.intercept} to \code{TRUE} in the call, \code{cm} will automatically fit the credibility regression model with the intercept at the barycenter of time. The resulting regression coefficients have little meaning, but the predictions are sensible. <>= fit2 <- cm(~state, hachemeister, regformula = ~ time, regdata = data.frame(time = 1:12), adj.intercept = TRUE, ratios = ratio.1:ratio.12, weights = weight.1:weight.12) summary(fit2, newdata = data.frame(time = 13)) @ % Figure~\ref{fig:state4:2} shows the beneficient effect of the intercept adjustment on the premium of State~4. \begin{figure}[t] \centering <>= plot(NA, xlim = c(1, 13), ylim = c(1000, 2000), xlab = "", ylab = "") x <- cbind(1, 1:12) R <- fit2$transition lines(1:12, x %*% solve(R, fit2$means$portfolio), col = "blue", lwd = 2) lines(1:12, x %*% solve(R, fit2$means$state[, 4]), col = "red", lwd = 2, lty = 2) lines(1:12, x %*% solve(R, coefficients(fit2$adj.models[[4]])), col = "darkgreen", lwd = 2, lty = 3) points(13, predict(fit2, newdata = data.frame(time = 13))[4], pch = 8, col = "darkgreen") legend("bottomright", legend = c("collective", "individual", "credibility"), col = c("blue", "red", "darkgreen"), lty = 1:3) @ \caption{Collective, individual and credibility regression lines for State 4 of the Hachemeister data set when the intercept is positioned at the barycenter of time. The point indicates the credibility premium.} \label{fig:state4:2} \end{figure} \section{Linear Bayes model} \label{sec:bayes} In the pure bayesian approach to the ratemaking problem, we assume that the observations $X_t$, $t = 1, \dots, n$, of an entity depend on its risk level $\theta$, and that this risk level is a realization of an unobservable random variable $\Theta$. The best (in the mean square sense) approximation to the unknown risk premium $\mu(\theta) = \E{X_t|\Theta = \theta}$ based on observations $X_1, \dots, X_n$ is the Bayesian premium \begin{equation*} B_{n + 1} = \E{\mu(\Theta)|X_1, \dots, X_n}. \end{equation*} It is then well known \citep{Buhlmann_Gisler,LossModels4e} that for some combinaisons of distributions, the Bayesian premium is linear and can written as a credibility premium \begin{equation*} B_{n + 1} = z \bar{X} + (1 - z) m, \end{equation*} where $m = \E{\mu(\Theta)}$ and $z = n/(n + K)$ for some constant $K$. The combinations of distributions yielding a linear Bayes premium involve members of the univariate exponential family for the distribution of $X|\Theta = \theta$ and their natural conjugate for the distribution of $\Theta$: \begin{itemize} \item $X|\Theta = \theta \sim \text{Poisson}(\theta)$, $\Theta \sim \text{Gamma}(\alpha, \lambda)$; \item $X|\Theta = \theta \sim \text{Exponential}(\theta)$, $\Theta \sim \text{Gamma}(\alpha, \lambda)$; \item $X|\Theta = \theta \sim \text{Normal}(\theta, \sigma^2_2)$, $\Theta \sim \text{Normal}(\mu, \sigma^2_1)$; \item $X|\Theta = \theta \sim \text{Bernoulli}(\theta)$, $\Theta \sim \text{Beta}(a, b)$; \item $X|\Theta = \theta \sim \text{Geometric}(\theta)$, $\Theta \sim \text{Beta}(a, b)$; \end{itemize} and the convolutions \begin{itemize} \item $X|\Theta = \theta \sim \text{Gamma}(\tau, \theta)$, $\Theta \sim \text{Gamma}(\alpha, \lambda)$; \item $X|\Theta = \theta \sim \text{Binomial}(\nu, \theta)$, $\Theta \sim \text{Beta}(a, b)$; \item $X|\Theta = \theta \sim \text{Negative Binomial}(r, \theta)$ and $\Theta \sim \text{Beta}(a, b)$. \end{itemize} \autoref{sec:formulas} provides the complete formulas for the above combinations of distributions. In addition, \citet[section~2.6]{Buhlmann_Gisler} show that if $X|\Theta = \theta \sim \text{Single Parameter Pareto}(\theta, x_0)$ and $\Theta \sim \text{Gamma}(\alpha, \lambda)$, then the Bayesian estimator of parameter $\theta$ --- not of the risk premium! --- is \begin{equation*} \hat{\Theta} = \eta \hat{\theta}^{\text{MLE}} + (1 - \eta) \frac{\alpha}{\lambda}, \end{equation*} where \begin{equation*} \hat{\theta}^{\text{MLE}} = \frac{n}{\sum_{i = 1}^n \ln (X_i/x_0)} \end{equation*} is the maximum likelihood estimator of $\theta$ and \begin{equation*} \eta = \frac{\sum_{i = 1}^n \ln (X_i/x_0)}{% \lambda + \sum_{i = 1}^n \ln (X_i/x_0)} \end{equation*} is a weight not restricted to $(0, 1)$. (See the \code{"distributions"} package vignette for details on the Single Parameter Pareto distribution.) When argument \code{formula} is \code{"bayes"}, function \code{cm} computes pure Bayesian premiums --- or estimator in the Pareto/Gamma case --- for the combinations of distributions above. We identify which by means of argument \code{likelihood} that must be one of % \code{"poisson"}, % \code{"exponential"}, % \code{"gamma"}, % \code{"normal"}, % \code{"bernoulli"}, % \code{"binomial"}, % \code{"geometric"}, % \code{"negative binomial"} or % \code{"pareto"}. % The parameters of the distribution of $X|\Theta = \theta$, if any, and those of the distribution of $\Theta$ are specified using the argument names (and default values) of \code{dgamma}, \code{dnorm}, \code{dbeta}, \code{dbinom}, \code{dnbinom} or \code{dpareto1}, as appropriate. Consider the case where \begin{align*} X|\Theta = \theta &\sim \text{Poisson}(\theta) \\ \Theta &\sim \text{Gamma}(\alpha, \lambda). \end{align*} The posterior distribution of $\Theta$ is \begin{equation*} \Theta|X_1, \dots, X_n \sim \text{Gamma} \left( \alpha + \sum_{t = 1}^n X_t, \lambda + n \right). \end{equation*} Therefore, the Bayesian premium is \begin{align*} B_{n + 1} &= \E{\mu(\Theta)|X_1, \dots, X_n} \\ &= \E{\Theta|X_1, \dots, X_n} \\ &= \frac{\alpha + \sum_{t = 1}^n X_t}{\lambda + n} \\ &= \frac{n}{n + \lambda}\, \bar{X} + \frac{\lambda}{n + \lambda} \frac{\alpha}{\lambda} \\ &= z \bar{X} + (1 - z) m, \end{align*} with $m = \E{\mu(\Theta)} = \E{\Theta} = \alpha/\lambda$ and \begin{equation*} z = \frac{n}{n + K}, \quad K = \lambda. \end{equation*} One may easily check that if $\alpha = \lambda = 3$ and $X_1 = 5, X_2 = 3, X_3 = 0, X_4 = 1, X_5 = 1$, then $B_6 = 1.625$. We obtain the same result using \code{cm}. <>= x <- c(5, 3, 0, 1, 1) fit <- cm("bayes", x, likelihood = "poisson", shape = 3, rate = 3) fit predict(fit) summary(fit) @ \appendix \section{Linear Bayes formulas} \label{sec:formulas} This appendix provides the main linear Bayes credibility results for combinations of a likelihood function member of the univariate exponential family with its natural conjugate. For each combination, we provide, other than the names of the distributions of $X|\Theta = \theta$ and $\Theta$: \begin{itemize} \item the posterior distribution $\Theta|X_1 = x_1, \dots, X_n = x_n$, always of the same type as the prior, only with updated parameters; \item the risk premium $\mu(\theta) = \E{X|\Theta = \theta}$; \item the collective premium $m = \E{\mu(\Theta)}$; \item the Bayesian premium $B_{n+1} = \E{\mu(\Theta)|X_1, \dots, X_n}$, always equal to the collective premium evaluated at the parameters of the posterior distribution; \item the credibility factor when the Bayesian premium is expressed as a credibility premium. \end{itemize} %% Compact Listes à puce compactes et sans puce, justement. \begingroup \setlist[itemize]{label={},leftmargin=0pt,align=left,nosep} \subsection{Bernoulli/beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Bernoulli}(\theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + \sum_{t = 1}^n x_t \\ \tilde{b} &= b + n - \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \theta \end{equation*} \item Collective premium \begin{equation*} m = \frac{a}{a + b} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{a + \sum_{t = 1}^n X_t}{a + b + n} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + a + b} \end{equation*} \end{itemize} \subsection{Binomial/beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Binomial}(\nu, \theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + \sum_{t = 1}^n x_t \\ \tilde{b} &= b + n \nu - \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \nu \theta \end{equation*} \item Collective premium \begin{equation*} m = \frac{\nu a}{a + b} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\nu (a + \sum_{t = 1}^n X_t)}{a + b + n \nu} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + (a + b)/\nu} \end{equation*} \end{itemize} \subsection{Geometric/Beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Geometric}(\theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + n \\ \tilde{b} &= b + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{1 - \theta}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{b}{a - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{b + \sum_{t = 1}^n X_t}{a + n - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + a - 1} \end{equation*} \end{itemize} \subsection{Negative binomial/Beta case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Negative binomial}(r, \theta)$ \item $\Theta \sim \text{Beta}(a, b)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Beta}(\tilde{a}, \tilde{b})$ \begin{align*} \tilde{a} &= a + n r \\ \tilde{b} &= b + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{r (1 - \theta)}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{r b}{a - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{r (b + \sum_{t = 1}^n X_t)}{a + n r - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + (a - 1)/r} \end{equation*} \end{itemize} \subsection{Poisson/Gamma case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Poisson}(\theta)$ \item $\Theta \sim \text{Gamma}(\alpha, \lambda)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Gamma}(\tilde{\alpha}, \tilde{\lambda})$ \begin{align*} \tilde{\alpha} &= \alpha + \sum_{t = 1}^n x_t \\ \tilde{\lambda} &= \lambda + n \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \theta \end{equation*} \item Collective premium \begin{equation*} m = \frac{\alpha}{\lambda} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\alpha + \sum_{t = 1}^n X_t}{\lambda + n} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + \lambda} \end{equation*} \end{itemize} \subsection{Exponential/Gamma case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Exponential}(\theta)$ \item $\Theta \sim \text{Gamma}(\alpha, \lambda)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Gamma}(\tilde{\alpha}, \tilde{\lambda})$ \begin{align*} \tilde{\alpha} &= \alpha + n \\ \tilde{\lambda} &= \lambda + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{1}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{\lambda}{\alpha - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\lambda + \sum_{t = 1}^n X_t}{\alpha + n - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + \alpha - 1} \end{equation*} \end{itemize} \subsection{Gamma/Gamma case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Gamma}(\tau, \theta)$ \item $\Theta \sim \text{Gamma}(\alpha, \lambda)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Gamma}(\tilde{\alpha}, \tilde{\lambda})$ \begin{align*} \tilde{\alpha} &= \alpha + n \tau \\ \tilde{\lambda} &= \lambda + \sum_{t = 1}^n x_t \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \frac{\tau}{\theta} \end{equation*} \item Collective premium \begin{equation*} m = \frac{\tau \lambda}{\alpha - 1} \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\tau (\lambda + \sum_{t = 1}^n X_t)}{\alpha + n \tau - 1} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + (\alpha - 1)/\tau} \end{equation*} \end{itemize} \subsection{Normal/Normal case} \begin{itemize} \item $X|\Theta = \theta \sim \text{Normal}(\theta, \sigma_2^2)$ \item $\Theta \sim \text{Normal}(\mu, \sigma_1^2)$ \item $\Theta|X_1 = x_1, \dots, X_n = x_n \sim \text{Normal}(\tilde{\mu}, \tilde{\sigma}_1^2)$ \begin{align*} \tilde{\mu} &= \frac{\sigma_1^2 \sum_{t = 1}^n x_t + \sigma_2^2 \mu}{n \sigma_1^2 + \sigma_2^2} \\ \tilde{\sigma}_1^2 &= \frac{\sigma_1^2 \sigma_2^2}{n \sigma_1^2 + \sigma_2^2} \end{align*} \item Risk premium \begin{equation*} \mu(\theta) = \theta \end{equation*} \item Collective premium \begin{equation*} m = \mu \end{equation*} \item Bayesian premium \begin{equation*} B_{n + 1} = \frac{\sigma_1^2 \sum_{t = 1}^n X_t + \sigma_2^2 \mu}{n \sigma_1^2 + \sigma_2^2} \end{equation*} \item Credibility factor \begin{equation*} z = \frac{n}{n + \sigma_2^2/\sigma_1^2} \end{equation*} \end{itemize} \endgroup \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/inst/doc/distributions.pdf0000644000176200001440000041026615151412431016656 0ustar liggesusers%PDF-1.5 % 10 0 obj <> stream xڵYK6 WD#R/ 0r()[=lɞzTēb7HlI$O &0Oz=}mzy BpvzM1*t@e<]Z zFhwvK*=~BKoOeMY6}l}Q* Ɋf9@iL@Pau֞--h,jv"Hzp+0<T1h2 ~+@oVkgYR\`1'P&+mNζ<0zιT= SBۈK㾃\A?sLp2Ul=h#`'n'wbSq[5YbE(]rbݐgo$hFx`@&^ѱFy?JeP]kAMdMAr#ULnZSX_3*DfG9$S\݃ܢ )&a[z*Y.[[e,K#=mf+E~ɱԬMb&Gj9JkՅ"q;FHy0@et~(e{guD}@c!C ^fuv0Y ۠MD\zV t"?A Kr!:c(A IO\hV9x,sm,/4Ч/ */wmL&PM1Ħ3BW}51̺V3"`tUGXeks=M_efD˫a.gPh"O;'Wm% &I9 tľq8'b[GN 3䝕wٴo[hט,î&*atd.<<^$QŮ_;{`;b.coURΗ5tHa BFbR.~O+4iUe/46̀lK橝cjAV:vH |I3+ jL8HܱTv@37ױ8>^OsPzo QȔ>m+xXdgU%FvaO7ؤ%l.Iz!KR8eݩƅ/YXNf65Ahb%י#oBsH(Taq"wHZ7V5)JHaȤFc5CHQWSZ '/9v{26OrT*i̽]imuL[{dĻ:Ql-O!uqc,Rin(,eQ v7XPh!r>HVnz_{r17>H$> stream xڽ[ˎc +ֈ0  (Ye"D=\z<%K$COo_?8:J[ƒ% 4g~0d^gNHΛ4zC/M5o蜜${^!%EKqF۫f?*1R -O"4YQ3u`!0-U|׸tqZӯc讶)Y= .=6>*WτQޙU]*ΰr,-6ɦV錰amܱ8㢘C)(Zdxȏ K :|-.Q#_(V# XR Y;n(l eZϓ ݅J0 E {0NX DNbFװ6P'|gND}bRfs AXx=ڄ o*ADAHhmbcMNl^^ 0-ylpBpȭ c?:_(~0;;O yh "l#4e ]D)#yt;Co^42B }ٶyyJ:IKAW%Q*Y!)gvH}ʄ>yyF;NO0SOar_ب=bD~?X:B!6VQAG\Ȃ^5[EF%@Y+t-(PQM}Q!?u$0r0:hJ)'Ex.2ƕZ7dW`#kxFmUz讆"tX%SRRpxN J7>e0S,=el`EPS,1\ I~ҟԙBsU5D}>Н%1:gy>ZOKw(3 2>&<¢[Rjs Ӿ>Ӡs^yNcQH}$ ʵ3.pFbW}Ɉ튪%QdFy #pfBp( %ed(3h+jS)K"(]eOXɎ8eZŞnu-opR]wx(ȼ޳]杖j8w!u5`m<'|%h!emQb@KPF:Q ?o|b˝/=+l!]:hK6<+odS Y 2tDT02{ }mi#u6]BlW5VݳXO\fcYξ6ґv"o"\DOM-x2qn{RnZε'`$G]V5eƃj7鍤RW yj~7'2[ (]G#|Z[:Fv@|F-*{q*H2 g5z%}sXAsc>"q/Py|K]o]Q}PZ)vW $}V#u闵yx4@g/J }{k>[-I0eQ-ɡjMBD^RI!\~ͥD +/[ɓy=¥Gk†BHm\^K]u]ݪn֩tR&E1-n}Y%Д88h#`tgnםY Fw5NGt׸\{+0N'4O畗Ɔ .C=VS}s=mAJI_MyܜASrT;p)t#Y-PO2Ԫfg$3B5f W &Njr~!Z˨wsbKj;a\iJ ևOWJ endstream endobj 38 0 obj <> stream xڽXn0 +QI,a:`b ;Mzi?Jl%q;nMpJ#HI*TPA=Wn/>_ ^GmDlڢQ}ުT:BD=tDh^mw?-pgn v7itQp_fLM/څ0,ԣ!4%Mf:r5y )]WQ;3c-b ܏ `;푮'x.q6'3sC|Mδ`@6&Ӂ۰!dQJ[]nC-qm Ux+a $qщFp@-Ysz%k“3L/Px* 8 ,NPKK{Cos+H=ZdG ( u 4'$ʦZ='/vk`.!}Y)h{iH<rnAXie0~=YA]Ł:vP$,\xjE>M2wɔ2YYj׵Jz1x[js}l9g BXܵ"W> stream xڵ\M$ W9,Rbd/g %?*V{װKH}F7/{?޾.ޒJt Iy}7??%1R<N|o|HPE;_ۯ7eRt݌ 3ieDHnh\3<ŋq*4LY,17핍e+M1 xUt`qTbؓJ\^ko.uhQRXEΟ*a1 ׂ|"M= ny{䥤(n\_^[ գ7s6 'Za(ݥz&nUroݶܟ)*(X9pږh<9OAN)Vճ)̚h;6yYA?hn/#{H|mMh{ŃhNDT c (JSxB^uy ‡nY`O'Ġ e)-֦Zh:uv.G*ErV®4Ɗ|~6oaJ<+Y˧O^u_(bn ae.a~+?L]kY崎'ϯP6ک-y+5z~/O0( zaLLf2ΚVNü8 K@zu&-ü܃XF9L0E {1Ak,Φ h؎}YwGxt$},;EDEX2 Jm -**B˻Lmb  nEpEi&9{\cqU4VTSTeUa~E iV1> ? 뀼g߆n慼 /#xx8WMW]s]7|3!<uuUg]՘'Ȗx庈"3u<`J*!81`* ˘W|ĬG.ONkx k~ND0d3Oxw(T+NL{9Z[Md ^#x?b׮cvK9[DElnKPP3N E7U5t23Gq3:>ռԐ_9}XP"JߌV$\%UЂ9Q1 f,؋!dDXm+kI~@_u&hv)!EtH*Q)%aZ-@KWnaL ö[ k1_VIP?eB?`6cTW4-WhWʹb=`@dĻ*J&{*BBñ K:-e7ܳ^Ҫ̀& 輛~#aL@(j#G:EtmN*g$<$vx7G(NEs/THjhY>ゔlM"@N&RqFVMCemRnguNl.w43Z04*>f/᮰?OFNL·N=gIHYy;O^!UPGfڭKU7+Q|{G$HF f䎛d>=I]w7ၤQ@'cFݻU:Ndr%QY;ڵSwjҎ>!8D5#o$'ì$ue <vJVDSd\ΣpyMke'i;M$Ta  ڝ;--54FӅ&?j41&tTGC3(u3lzXkXA֙Zy±?eTRl+BsLCRhs "D7J4 K粦ڱ3RKl o+8ͫJ/uV}-9 "ȅ(>k_C'˱ &ǂ,]kJ-*ߋ9UCv/FT5bD~hz\F9[ ,S/t{-υ򼦵9_k7Vﵾ|ldxo\ M+#Բ4-32O6mg&56TYvf`Qۍ|i㗝{m r g$Iٓ*JNYcs!cz H3f LBh~x#dmr.k?GC_nsڻʁAJsL'FSnӒU(39"c<& 7m61aH8}m>9㊠MRmw2Ivg(#I>fs3;$aɌ.[UA *EttPT4W֭E&Vv@ 1bP%<` KʌKC25x,%] Ry*q)ˣEsK~R \o2.v $;ehwˁpgYu̹)SH 3\s43 _XX2oM*Ղd!p*vX4N4>$&Yݞ`#f܍Ty^Pn*:_ƅ5`PT/dfձQR$묢HYܤU qƛ`xR4*e'(f%J b>w1kԶxw899UE&WE'.X+ޝ սTw;Q9(K];*#F{wl}I׉֩J^Ouֺh-OUUJKҽF|3F6Bj=kU'ğwk*~ endstream endobj 54 0 obj <> stream xڭZˮ# +z "@2@v.5bHJeo7zj](CJl'|~˷S۷f1l'7C<@J!%( IxHȟ5dq O 4̗yHw[w#cQFH0 OR^h\øN^h(|{6{za?h#f<>yfzcF-C|_/% UJ `#mF&~͛״ 乭uj!tD#$?z'}SVLvH[KɦɢVEd64Ny;'%"l'@Xu%_ȟ}ALͶN,&oE"yde p,@v`NB;pMz[Yݐ-楠r'OM m\AH' Z6n@Ƞ'rɴ-+nfB%"s@{{2&ُivGm%iGoLjuyJsWTΆh'C l 1q)12\B(Z߲SUs㏲9s gejī0#>\%K` _Rŀ\gI!ٻD^'}pL~W""[ih :јx R,5^evm:^* L}ڻpQ#M8E*]AaVKh,t=.14)eQXmߴt":"E᭞-&̫SFA dDϧVgIQ LX \  _^ 8AB6> *jyHɟg*@0q U*%٭A" q"KshA |jSx;=f[llR4Cv??-2Ab;}+yV0UD8Oevs" 9-S`7 p&KʑMtyIf, ,)鏗g7H)l؄OaDܝZZ錍~ߔµݧUkLQ!6nh(5"*__AWEb2;F YEGgz(񷦕g ,1 4Tt3c XbN!fDCB84UĀtobE)9 J䒥FZ Ԥqm#Sz+jm\Z+3Zݭ}N>pi8l"T+hڵ$PEhXp0̓/T>&$X5__T/uh.BYo絈`5礪<2nYa$=+|hT:ɴКBӗ5N%k{yĭ^9ۥrcGNkFP<2Hą3#] }OBKKh5]va1aDpe-l1Q64Q Bs,Ge+/i^D7I<QLHl^Ee'mj>OX}OdQ2TcWe F7X~9ؠCr%K;= ?VDo]^ Z285 0bz_9w_n [gr輬a\G qz>qve{{,;B M\O%$u,Kr~K{/׎ f jҊEl&ڶ+擉N8Y>}֫O .O˛vTχCH3Dͻ_^uBPJ哈W8JP;΅쳻AD Fro^ W'm!CEҮ3>tǡ] RVASۙ1X3 2c@Dm>t9QՕ_Ȍ8 *vbz\t 7.g= m6ܥ,iQ*"~qaחQ Ԍ+!`VR ~w}x!UҼg־u AEL:ϮTE# b˞{]+P[fjA|m.EY|"vXslCҩTyVN;/ڡ> stream xZKW*>>.&} r{6ߤDYvXx,K|Ի*jcE,O./m-Ko_ߗ~rN7)U .R~~|R`JSgМo토0IAI(W8 PófOEGX2Q,kLh<7yL3[^QA\{-܌WEE)*~e |ѾB%' +C+7HFhͦf/a #LœFdi-V̿zz.i-3!Pt-q ~4PIβDhty:,Ͽ]|īϫ 5KRH_3SF8IlJiiTf`SZxU7OZȉHAzAX3Q9a1LW7LD#:` CYI#S{8JjFQZ*?XV1-< G' Sy6`‘h .rqf}ngk \#u.29N=M߅?{RE O:[GP=QR Qo8#.ᨇ[D-[dڽ9fMw!!!18U9pKbM`̖֫8`Vyf+[C =E2ь>Ք=K|N3 U Sr%!Sp"-}k*?d3̤l"pHTl:eCUmfe/BgPӔ@";h;:Fs:,dJXFe4(^ Q4zznI+ qH M{(4$ǩʗ"+>gϙ=qG f.R  c#S l Uʧp_>8D_%r6맔Qb]y&Yc~iR% 0Q<G##򯬘lʭÊ Yy}Ť#STLG̶# P1+&] Z°x͉^D[ [uh}n 1j^wAbp%08!+'j'*d}YBI4Cx鋅־ӾXi۾( Ĺ v/w1G$[N_æG (4+hJ`4uu_:V`:Sl|ub "h'a.\d$AimV SmG=u<6@c.e44WRO[E{L@kکxyw7+ɀ:nqo%WˡirbjX]}֔dҦUV &y\*^O >xhh}c[k8(?6zy>mR aͫMj꡷jq/8zTqlwȾ\rXޔy[||oC`nE]LC3Uz_^UfOWȳ&^^!='G yxe'Ԭ07{'N/~ ѱ{2 15K=y*c)wO"zʸ:NX aƤjD@ JX$yӯLcC Yw uwΫL_5sb9]k<ڝh7/6@b2m 90e. I=с FCS{461kX6ge&Ρ#wPŷ?DYPՃ~]89Q}['mLXVILO˱we) 6&n=NsX}:A[Tg$d#6SǦ endstream endobj 69 0 obj <> stream xZˎ+7W"Robhl&d3?*˯n'M*oRY~_`\7~Exn'ò/e0ˋ.?/ߗ-?X`y;/ w@@p闝JN ].H N]$@$.6ژise;HΗ=Wdw2_N:ulb0$Y y>G.}&qL E8Of ƒOl7G,]HI D/%,?<lZm ATO,Xz/8@k,sʦH["n_[ I0Jb/$Jغ%2%o)8yTSmHľm5P8"jlo+X|Ǚ59(X?H4o1i>_)Cvg*BQt^er WK2)$;pG#&&7o^JN].F[_,]0եzC v&' VNlOJ=HV:,8"[qlJoEvCL5\aEvRho3nì{Z\-Jͤ.>T †xĊi ;ȶyV FO( gTh+A  UK !jUhpt-x.<9 9J8o=-am1z`d{'p`!"_6HQ&Q]<O,א:d%߶4(<T:QU%kȨy:o2Wj=2mƸkk+xjZ۱~I9ۦqUIࠟD+ Ӎ֞C (PvDq7t^?UN;6bB\#T:n]G 6V>OyU`,̲X Ə>,F0@!xwh jh?3yC 5G 7WQAMj[B:P^é+/^HZN&EwPfOy?L;-bfHp?E*wl͕ΚXBi›*vEveb#JFf/thЧCJuai&6o&.س;&<{ܐ N4|ӊ=J}IgηsDxFtT){Rl]*PW@0q.2Zp%ħyCNydT0.8zBO,ey!#[u}X"3Xg:XUaםm@IE7Ut>u#:i}JmHLd֢F2.Se]czjPls͋sVv1sQ {o{CN 5F8 =Crҧ,]ZE^loacggwT'^wmmw;W֥TsausvUǮŁΆ*ile ,v?y`! endstream endobj 75 0 obj <> stream xZɎ#+} |=7Çu.}=Y\3jhQJ{E,bq+~7翺Epx˷bs/;rv瞟oSjiVnI&i% ~u^e`«(;8B+c46f9WGs~>ⰓA,nQr^1U\.hCM>g."g|p^uU\AOjΫ#yyZ:Io-=Ib:u)ńZvB1Cse$ՁDIq↻}F1f9}%_oVq\%l:Z (kALtEE&;NOz@ɥL;֝&LRC!Qв8oъ2!ّ@5#GY8i`8L'i6.v2L)&qlb[eF,*cNk6t _>m|`k ))ʬ&HP1WhĨY.Q]Tx3D^Saa)SAKsO(91K$2e!;fm(d4ɥm=K~^2xSeʽeJ>cq15;nvQ0[G#E%vχH\Q<{ԱdGMLy^Sɧ{<];qq1a^!#E1O^ eG+o6]=UENTrDdf҄Q{CAr9i{U%$c % .OƘA`4S^f C $̈́Nd{mE]`Hf ۽П`cCMc0j@;EiD} [H달$Ɗw˼ѲrӖ+m e4~aƊ5x|RTH鶠fӁU bを.`pGa;dr4ɲ'inP73!G d6ktA¾+Q\F$P'mI-<ovP;j"b #=f/S;, sbkS ,E_S3%PZ5pMd_KNAI eLQ˥ތXǼreǔR#7~Am}&~&9Su9Y>` Hj/ϬPĨ3HJ:Ž1kkL ޭ`y[:rW;Uð6~-5ݮJCt9G5U_wc_*M)-텛FZ&mas|4 ~GDox=J&TaIA B OoJ k m!F_DLnuAΤce&@Ib"@}xzh/Uq Wp19GRr uh;Y1;әxPP˥-K)9_3 8z3xKg*Kn>o=EI%]kG䶘8QwؿMIdN!1fnYK52ڲ&c޴Jgk ڽۂvqg+{{&2޿=Z3j]g/-L?}y6ޒEq&2{}vF!=ۿZ|ܦPQ߿_O vCWiC Ey$K}SD7Ģ +<[L$к, P\_-,bQ 26n 3UT6Džr6%Ib+Q<$&<&F>K،%5fѴY&\|j[wyf~1D #&G֮_-O|ޖ{pizk*%(+.G/;3Tv3]P3b}gPd5f_u婻_htc?_ J&㺾|Y/OBT{ endstream endobj 94 0 obj <> stream xڽ[ɮ+ +D]H7]'odx_=w'z&^%3o!-LER,BvX*k[=nՇG^Tְ7iBjՙAk7mUm,#AU\YغzLe߸>M{Kb_~goں%FViUN#ԑ=,ᄊHۍj:䉯uwYE# <-6xlo]VE-˝p@K#Ik3E5'~^nŠ'BS*4q{eAl3ž_#kӘ-9L_NSήK5x6l|;McZjOx#[w1j}z:F;m#ȁ葲Ia)`5WDZ~qnn`otg3UHP<` OBVnlQ|M$;딨:-l7b|g%M"uiivƴ$v>&Ugђ6 T^OWn p]@V5 FM;,֬t<7g"ICC~hK@GZF=eσ-T[R:وI࿛cg )i¢=vpESM %W(;jDgpDibssgG F%bGHUNfD^ eïkH;PEWC3}"\ŋ/g]1f(=XddJ^#nTxyN; WU f$Hb"E= DPa_ᔂ],d$P]UDX*ܮƒtsVh$ Buȫh1 7]|]R{Ye^3nu1X3d k8ͥi]E=2~o *?ѿ\a;7ny`Rchl-N홨ηJ[MgSHؐT-RFL_1g#88)PDX_OQT$8b,ηՀ,L55[ NjBNV:+VhUS~> {~I 9u|Y C#AQORT:ԺdR7JES, BcӸK;H//r g%)m(RJξx nJ$:;W->}:!(d~x\-ͶH k3`\gOxҞJR%E0rNa7"np#>ݫ[\M_)2sg9TAc8Q3e{dFgY+^g?ތ2&_Rvޝ^t8}4W; G66 mΓ.x[gdT[=">ȅm؜F؜exc6n{%Z슇hv2 q4lwNwOp3lսxy"Q6aQr΂d\ݭSћ#\qI%ٖa|2p;A vצ,ͯF[܂ِ7VЕV3SȭCKۑIi:Ag-R.ⷱS>=g{b1,4]vrA+ܜKB3Bd{Dώ&v~pO\șN9RA}j}n ;v/=h~j3N^y6Cz\>XfIun?l*ѢT˛&mLQqкA A١uʘ= BXO솁OWC}=?i¢ګH?( endstream endobj 103 0 obj <> stream xZɮ++İ Y\IW-M~?yh _㪻O `vw?_m+l oJaN K},% 2\rB4g^ {57>_}xPӍf%3ν~Xۋ 鑅<y /'r'wTPx<4 pҜ3UQSȪ[l"N&8\LX</?Lr=lEϠ/:/WuϑJހC*Fj?)cjE L5ϲn7X+G5S[SfY>nnOD!qܑV;0O1慎TBt$HAskK/! x#ٽb)݅dFvh\=!F׌IB r8_:d9#ʉ˪'ec%"f4 *c6OHrxB%cL-\Jt6mƭJ'@"(ÄE:CA >_B@%B[b(Y 2ӗo凌XN*yDHE ꆝ2g&4UqZ5i!w65&&jh{%ghvhpvX ,;h.ە M5)){0ebϒ1,w}"lß#[0`p_CrmB>HӷP_ l}UQi%wl GB**%'ZĴ-uAZv}Vt6Jb$:V\I["9}Ze;ƪ3*03VPN) SL"HY$2ב) RkՅ$o9ڒEL[*V5$؈:u7Gp%n.+|7Sh R }эʦxhBؼ㙛2'ݼAFGvn p-!)6houRK1$NlS~cb'ŋח{~o$ˋŒڷJ ^QJK1Od)N/&*fdC- F+>;E&PM}eܺgSZAD jѐB`AjHYB9# ǭ=$1<>/xdu>RFCK@Xw]ۄ @ʹ!K)@^J l{/+N뷨Uؑ_+nCӠPFj=p*0 uBڽJOpl:Usƭ%A=AF '!Go> stream xڥZˎ++İ&0E@vN.BH {)M4cۘV7_S~hGW/o3_ci#)oV vrAx~$ $ Yx|/q6'w& _A7Kěwŭɾܝ'G6l/n;E- X[iT`1~""IXer=uo[Qjah?Wץ!4'Nڐ0ZrKIZcʷGqwdvNx;~N,(*No)s#Ok~d6JaǍߢ ^Q$J(wέ|s}8C>N`\If4%O ׺t:Y%ʮR<&EJwSUvEB߅֣axCb y/Vͳ bn/1XSM&Y[! CGň~p])n+\UkT\PY鼈޺m0 EjضY# dJm}@8(#r'v@NFZ6'"E:N;M,j6GxrEQomJ"<Xa\=EQNlJ l0 sOSQ%Rhg͸0laja^X<ֵ-]ԣh'%XrGrڼV(7ҙ%t(dqkt@&t X ?pY;c腜f;?YYZ&2)HUP,i )Q|R'yĊ|dU?צ-Lvu)D3.fjKls%I(Zcx0H&TdJ9RVcAdF:bM֪@W&+`_0(~~!*cvK P+:IŽ e#b 'zlC+Ut괳.ٛ7>*.Llܢ$>vYJ#=)x~tjYOl@F}܄TY/BHA3ݲf,I҆2#/4 (G:{YD;WQR  B:l<+T1yr  5rhlLFFUj zddXRXG%3&Psaʍs[1&/ٔu}` dVu4e?=\Ы{O;qUX㐼~5#ڤ*fc:8= :@>ܮ~Y"cna$.``SG,ˆ8)q}gaZ%IDE-|HgĨ8m;ԍzVr5V]iWnN#20Z鵏JRxAB~UɽN`7W=BEĂ0@z͇k됗++%rY>(ElHl^zPacY N<-wKNfM]52 OnAщ^p cf"|f?%3'b xITF5E% LTر@2OUelH;ޡc7q*'=<4tf'i<*,RYa D!h27sC G^VRM+;{W恐 [͞E w(R HͶu:'F'!%g9Oeטy\w[{ j"! Ѡ*6QɰAۯ=zdWyzC.hs9̖|eC{P#T%WCup-]!\_jP)YNv F hSZ,-\xhUׅ2vQM0dњ"!a 9{ ֹs!E|}0od, C6{X3h$Vأ.vTJ:eXTZ˕&b W ~IqSBά^M9 Qk'?IrdSCXCZ<}TSU3-#%) EVS3]-4P9SQN ԀA܁Q 7bW_1\C>V lWA'(DݸN*@>K9Pdw{ӷ3KT_H/~f3d_0 _zD>yyi.a_TĸL@nL$_d/ґ(x%=~^a nd!;_Մ endstream endobj 133 0 obj <> stream xZˎ, +Z0@vNfd1]MCRԣ^=3{SU(y0Et~6x7@3Lo[sN!v׋{=c_ꂿk[K"8`_Pڌ-0u3u*=aVȕhan{`#kG[d 3ڸcTǮ N*GM\Ne Qm3.,rt5 o8oZ7pm_0ړr:G9c+cBp!kOXKϨ"*U E#'Xڄ}?b.|'fҥ1B-> }aLxmCruUohMXV2 L Plm#Cҥ/(XHOG)1upu{&{ƸϸƸ MkA,ak=DM/=5=+} t HIKP,~Z 6/3>qHcmOՕ45rK*HՈPdA@Wu*FƑ۪gƨPZv[!Fҗ~I7`|ť#=,h& :QTs*+@Ʌ1F^fD\b{`EtJn' ``z$pM)eMȸDWa.ٍ^ `QôٓV뒯 6ONlc0n+Wd_&şݢ[@\|i Uw5~~kս0L;{vE:w گQMN媬p[R {uEb>=&# `%3<=LaRrOy݊ ':vtzadٰ[eV }DޢZµCIAi-@ƞxel Ѩ~JRP,tݓ4x,s'2VR_8^mfz07kXT|4N hYU16 7$'C_AC(#~ԅOm*S4"h x79f%8r,<+sT\굧;vh\mLQ@/-s3;I)b ^[y7䃖o4F&VMSqyd\zZRª]^MVÆDe)I@S'zYҍEe)ӹ!?.gEs%6f=t(j*`gz7vH.c>zrf:D2VgZ7 Ӹh\}pmH B> tR (u !JQ7jwY^}P!ϊ"pNO48a#, {Hb!EÆid'.lTuAze@tcY>"}#0e^n٫kY5;$#wwDGWSLdPaS57= )5y&4zJgvO`98[jm6AFߓNs~9x~/xo4z3JNWdpuRF[H2RGYgSHr/ʪZdzTIC˕F$6t.}gJ)Kk˙N4Tn]fW QhϔV٧-<ۅ{l&8jΰv{QomDΫ m> Ťʙct`ϴO%uAi-s:y[=ny Oؔ"͠T t3ҙ-PS)\0JlyTd!) ܭO`u_4s@ *Ukt> z4^o'atr6'ak(E2{/mcЂh HlbgBCc+K͙nQSٮMlԣ6O旼@+VJյ n9X/Y ՀNvaP?q e Hw5H=rF^.v!e0 -93w 9s2`R;mz᥾y|Yl@ɤ8dq w'X|&e۔Rux?he67y"noKj(5c-|]$De! !}%?AᄂJ0)1Kb 6~<3bm(yK vĪt iTZ㧟? endstream endobj 139 0 obj <> stream xZ;$ +,Ro`ᠧopGUKBWD)}|jX4% +=Bo74Wn -={T??_'V1.mw9bq^yoL*:|,;\?}Y7 DX\$S\*i_oZ[[.tうt!2&w)K=X+U/DFQΦRz7 `Wbk#x& +󲉦;"-Z%a5tPђ/ԑЈH}@[@1#po<߅i0P'Q.o3G+C t;=!5A;Wf-誎Y@g+k ƓaQShWzZ'J\5;mHTHN$"gb]hf_?˫FRA5SUyWYEgQ+j5 ޯV7|<\ i+$yP +\RqWB&X@de; 0kCl ##QTX'Hl2)b}zpcpl+>)5viYުX* -nnZ% l\[r$ʎd e2VL*(|$EijFO+{ϒ0+dV&S U4^9Nӧ#8n wvqv-sz]>d ~mAkž%-(pT ƄT{:#X^ZSLΎ"+ ݅N{%披1N6&#tM# *ԃdRﷷ&_`;oĐYC"Dh3C4w\;[Fjg}W1לҮ(|+a yS{MljU2fCMA@Y X40CT"h_EPڂ``-ǠCT%?t~ D.(En()NIaײ Տ(#;$>--W2<%]P1޴4vނAY$M7Ԥ#_Z/(=ѱBnƭF=-v&Ґgw{#5ߓݸ:Me7|t zI%! H8TQYAkxGH\Fco:7?|V&N joC=۰zZUЩFkyz?/XSIU՚bu£50-Gء19nxhu56ӝp(̀J|lT>" &7;¨iM+!u.dۊ]ǐBUzM%Tܜ:1+ݶRb08`*~@5WFO`KNR{Oy$>ȿ?(<5Grm gv&=GMr({&'o'aذ>L {*Ol=Sb,KCc,s1ssnb.J~ݴ9mbJޏR~[wb ؐcoOn{Tࠤi~H;ޭ&-[VQoy/"păѐ'h|x.Χ%F3# z;yP!48OTi^68/Z,zo3=>)vC~{܉24GC[Z)-/\%> stream x[k +)$%8]bJEQ'-QI-.R$N^t~#yݯo'"_k{: Q__H NL/qV_S ]߿o_u࿦~%ۯ?44B Ro೰2mrJjVJP ˺5͉&eTY\U4^S"iV>|~;0`HѪYTuVu.fINS'Y\у2bYi)3haMl?g'v]7g鎓z+ݷ"]V[6Ml[!ۤl(LY41;<$䘒L]=}?b,Fs:+糢f}m6N%ZlA8TydקF\ yi) 8 p#f7P6V@Zf򍛫tI[WuK:-G֞l F \e %hi!6) ed%]"$=E@(X8RJc5k!5Уn\|k=FQQj]JHf/+rϷs$w+1nmӨ:qu j{Ŭ#kMÇB9*Wl 9۽$B8(Ά 6yM'vr@"^ e1mh@@9+i. g7J}kyUUr }V$vktLNe6.f5)` SFM ы `$0d= :`8op_`gAD~1kq+F+œ^@2_"]jf(:Jwvf{꯹2J1g Z/jc\mKd}_u1%"kN~)WSRN@܆/@h@8Øl'Z17?Q(@N!6C Ghy7]}M0;p΂\z`>V r"e˻pMݻSgZl(;m}Gfw&&B''91x L4V@mTv r9&9Y M[TԬR &6:JoHiC}d3',KO`+v;ڮL Qј1e#B rJيI(/+_(l]G\Y[/Ud;=.eG3n\Ɵa./Sɝ($**.2ꗡO3T9\@=wr@p; `7 & kʪ".3XCŞ#D-i?Yk蕭UJ1KH䄾*U/u xWqs!B&_\G^mш7u6xk=GOc,Mڕ_ /"*Ƭ VpEBEw V)!fwL0 "bK[1J<=(r~;p N0W9NaZk-ZX(g'9C O0 )MH˼X͎XFMR1 *%s=YCEruB[}cZ%f!vѩpBB^EA?g9D /y^8LЉ_sJpJré&Qvp`c8 lL ܐD+VgZ;BkowXzwᤱo K9y譋P{ rªm(s=a*혹Sr]$ہM]LXlk:O!$9ne7|мc~[<sq' 4oQ":#({`X uy]r"|ERAKbn: f,&0> T: jSPf 2Ԃ .B/·_v bz_ "y_<^<.VomYJh $\Ӗ婃p 4ȍ*TK0CygؑT]ҜzOڏ( 2(Epjw$j,N_E2k40 * ^ Uj@P.)auX_y9vNu/OmC\\Rxp"3;obC6֮`,^7"򢶰ZÑH ѻ؂w9z0ۮZѢ֢߯O)u,!-7guuBa9zyR.~ !q%ZJXudk8\Ig꛳ve z'G;^O-+3cADxxxMWm饗##=aF6RVтap{zЮFas. @-W.I\?T57 X1J #' . ~L2,{2G@n3Xa[ wK̓8X/ͬo>8#OvayE0Yd;RY/}&GR"IfJAy F9SVӽW>C~P¤f:~;`?ߌ+*A9x̗é cYCo/TV (NL=~MaM ݤzK8Qihz ِ´P =%Xޣ`頶_'I)euJt5[Lv@[٧.EAv ]gy m61 Dn7ΗXK~ƻo `{ǝ+4wP2q~Sf4sr`- endstream endobj 173 0 obj <> stream xZ-";0@%/Oqn]fHxq{buXE- r~ߖ~R"ʨoja/7|Mz߿1aap?6%rqC*\;xp$nMM*yЛT /åi;yc[2LISug~KAĚ"-&0XU )_ (yz7sD*{R3 А[pIaXm/L1Ng*_-UJUR!\iSԟ*~be'Y߂03=TLWw;kfv:lEvL5Dh<^V54g]HɪT*2K:Z+{S!jXp&-Vx0sN,f IoO/ pyDr bq=jJuVEZ~_HPRM$ ?嗜~ 4˳KKX?OD4ZfC%=Ӛ"YႫ*N$dtE0 D!]2&!+>c `BE89p!Cbּњhp?#?#+ y:  k.s2܆L XFeғKf'%jָwORUKz7G%@|;NDE6"ֲ-PLAYZF pٝXR(e7g'UP'ٓѴ6pPÝilvd%JֆkY;)NǬp&nPB8:jH^6'5w=SƜCkhsKHvį \ ++^ʤkPχh64Cj fXY=#$㷗$yO6!q`Lq<-nLŞ5$q]aPqf[5l Y_Ura-wgk%Z'/BIGs=$wW6|6"h,1VOSּS ;kPSFK|?4XazG>>K|13^8=psS֦)ٝT esm$}1Tx!\a 3ŻcSˏ)]RyڕĤ t昘`&A&-"ܨ瘙L\X(%r =. 댋99t AAA p "g!7k}J_뒡:48g?Xe&I@ҾV.mP*)vGMhJ0eG8\ J0ɧUR Wv5poB >ch~գ# g8!_ip++Sz>(Ny8dEǤ: s8اaBꍕIǩVqՌAѽk$7d;9rϺkG;:9@sJtDȁξI GRn܂<6dP1}ZeA# zۿT?OE}x-S#{(2OX~[Q Ϲ!h?a_|8DRj+غb7*g 'Z9>UJf"VCǬ!'5h/yׄOpb噯|7m+I endstream endobj 176 0 obj <> stream xZɎF+jwUW t܂r>٤(95F CKUuWQgog+.??w>I6Q|ԙbyǓjC +k|DfpD&# 'rk͏/*T,Mx}aLUV8N~PȌ@a*c=ѕacx< qEMDC{-rՔyEgtr4,ƪΓySƚ+=nd'=&y,DA&?V?3-%'NqPy0Y Ps7 :78=Z6X*N|%Gu87D4휡GZ5H ;NYO&OäQX.ΊJ@ /DѿeEDpB8U#GaCHN0 QF׾RS`-t5PF WQ= j3Jn+D xbS_1@e|:m( c^t"BOuLod©#_9jW< )< @+PZN8ObU![SJZ8֘f&{&~.``=检/N7}C3;%:N_˛)ʍ ;$rݝP"k;5¸I=[ûg^G:ɣKiI]rY.9Td>"JiN v:%QǓQmioClA+!#"L _64ͺ d:~5i" ig#jyjBpТΆ[٠G6b0Ʃ `Փ#qomwm Grko#-m g.Ƨ(e^iGȍ95oTT6*IW*RѹJwP xS_-TXX4,m{gXP|o/It[VN+@i[M/[E@z:ZGz㷽? ~_HBB(' g(22_x0IܝOӿ9~G endstream endobj 179 0 obj <> stream xZɎ#7+aDp u003܌T> &,)݆6JQ\bx~hGCd|7n>85f. a8D61{z`b'{ʗί?go$E$r &s,s- # yb!Ev<7ThM6:( "HO!se?~CRO/"`Y~y?N;6C*JPTYlΛRn ,_t1ccZ1'4&r*;Y0=BPpK)W{˧RF̑0=0QH=b1*sꕲT(;0,EUPXyp}ۯJo6)/aO`* aXo%Հq{ϻ ޯs߻(+zАa 1R廵7vTU!\:{։L`zpN-H! x8Hjul\tؖA D{q\eHٱB0X d =ҽvך_يIiowh`'B&RNnSyX}R;KEƪ!)!<͠isT]`b5њ7`Lf4ϣJYL!- WaP$|.RHR}\D6G JZXy{-B#"ڀs:^uߝ_ʝۛCޜy7+\rǭK SSvSޝ'64m+2e:/]~FmZԸ(e85QckJK]d혀4-93rne%Ku`qF,mL~7PRI+ReS*U..>:ʮL%PcR7RNzJ01ob +-l+H}& i'`A[]2IIhX0T>r[ZLq: (͎MLwAݼhGde5:3@L otoI薴>ʺjBʨ\ּl(Z8ẅ(MeV-'V7u87P@Ϛ.ME`-X@Vɵ*^ztW_emxpeYU+VP:3$Қ=rЇ2uPx`h*O=\wn&D{c0RS :Oab%G@ƍƮ:Y+"6bZ<PJ)I;mF{,8w`mԽedWH±#qd۪' ]॰DO 8ds~gN&]?KH͙Ts@f KD 2mz^ik]Hھ@h%<9yb.\̵?,7ⶊ/$Cm7 endstream endobj 182 0 obj <> stream xYɎ6+M;0C8@nFk.O6RKm-4GR;# 5 9_ukiH5Rlv>`y=6:!^˧8<Ǣ41m[ߚ_^2PM6@vO['l>&H1栂g/ Mxh6A3+$=OxHw/]%:k sǠ ݘHA`&>#&r:;#BREȨR l'"K"Z]|N.ص jubM#lpm6H+pg:H7α VrE. FΌsj>?7ЂBOC*%ЍVlykBͧ6G&:P2 QVR6u:@lYFƫMR8}F%GJeUdȋbx,84i0W4[ dFr@`8j q٩&Zæ؄JX5T  %bB2VyeN (C)( !ez3.R;pLH'9Q>-ulm}ͼ3eE:@4vYf"b/cۛ2D6)q'^8 2fd4/{| Ɂ@2a~H)-R[ ,Dhj 4D&;QV-V QSWElB:eHJEv+O3 'VшEWk >hL`բrՒ%F|N E`_(5!Ől!(WF(Q؏$ ] n|+{8RZQw, hFŐ*T0Ua@9PU6P6*PQ`k3EF,j.bqC;žb;0r7c18fC9rOS ñ\3ً /S\N c'`pa9 eeo 2U=̳~ iHΑ$N1 K.c|X/BƁQ-i㍴iXZ7HM2dzsūYz}4: z=06s& ba96AW~Om]_HKld7~0iR=XhBx JUb|u 8 1YLd) ]KPѻ_ +%^2)2i>hQL ԙX2㫥J& ֭ qs$hK@[“> stream xZˎ+0]EwT6wY߿lU  l=HyO>s|:>3 '';Bw>P䉤4^dHu(.S93){/w=q@yJ[m 6"Bȗ& ZHY5JFuZekBe6̮QG!_@r*o s2Rƶo>^ <8e"0J\;..F $FȮpeEPfF2 q(*ϕ}4bo F^Nq̽>7!ܷ7y6׈W"ꀨ%,:H-W<cCJUtͭ$i:Ycr Ծ?Q!Fb\C'|ǒ(&v94 4`PxduN@0 ^;ͬ +TV7GVltcCr{3<,^_v(o ;Ӥ4F ]VIhNZ`Bӄi8 SvhMkC{Rs󻴤Yށܭ=׀iHQi `X'zV"NBZytgJcM[cў=DDºej4$S/$ph J&I'~ȶ*BX]Pߣ{Xui-/p(G6-VA7\jѽBB w1!H=C3Xd?ec h2m0 ֐71l$ ud>v !} ;!HO,(mqPlFۈxY@E R1Ho^\4k>xP,W\VBLh*sXT#V#LHG.F9P -vXC@:r9jYSiL#Ą<X[(}#w@"OCY79Lq@44N%?f%GL!~,6Օ .X1z/rJtg[d*T.+r'N:"r3g:^A5(L] ;Vd/w!ѪŒ=rB%a  "BC^Wo vM6*ȍኒn)}>AZ,ȅ $$WB)YY#=6n%QvEЗ#_,FY?gDX ݟdℑOHbĦ6je,6!5ImMg3D{gx%:5 ϼmdqs۝TŰL 0ݰafȲɹ2hCw[``oIoC"osQkcfZ,ZGrGշyyg#mDYL@1yMf+یTk:%ty骈oI@,2wӏ*.eԋpq52aeMc@a`+ˢ}YE|_?$gWlѶSqP5*ln3#^ M!΋%D䚣?Zځƒ{F!`EJؒJ@E0IF.Y^L[%*$Ynotrsuhbd%oQK^;=FXkhW;I^mSJHqTw/QEOR9lCUmw$Td\xpjtuXXu#iQ/9gGQRH bANbIZ jhvT?oT)MYhjN%2\w Wă.-W_ endstream endobj 188 0 obj <> stream xYˮ6+f9CE@wAfUg(eӶR$y%>f3H:E?ᯟqq[O#kԽ$cYMFNmcm8o=dnڐxļ'6dSK9ts.udeV"1}:bB1uP22f' }!4%%౨GVd=!D8>k1`?u;=B66p+w:D8x=B&8B$K5>Ο_6>kL8{?R0>1C'C eZ|PB!sr?^3)K,CK$gbF Iɭu6ؘt?H,ӎ"4;:a*X۰%.OwZ:}O8֬Zj;/3C&Q3&G$bp 6Ao].mLqk.0hjᱎYXZIG1hG6Jox#^ؘf6 u#ٰp6W<<%;1 &'u hÊN|lpjC g4',0޺ J9կK(qE5C S$AD^4YsIC΃5z0{mnGcaƒ'䬛GP 4T3)|;AkV;=)$+hy$#iΈz~/l/Kxee ό:yw+H~d B=>SepHw ͂2D$g 1cn{fP$ý \`gu/-2xDA gػ.t)T:m8hDl&u&^+ (RH46L_PKF}1F/ DP>%7!kR.d"TS"FɀD_#>.9-8RfqNqwu Z;?4v87vL52ESES_ʻh?r䋄cE3Y@,\£* (ɖtRI|_bBIB ]R B ZȭBϲuZc&Bv!W0౱W xuO!ױIW9xS!@!5_ш[nߨtVE9;,.J_ȼCØ9ԽnNC_@(C_CzBE{ʧEڰhULC d8 ]F-RhIOU*BhGq$DtHK;?s|/'[ԓ 4a,p @mh! endstream endobj 191 0 obj <> stream xYˎ6+fGI[\=SMdS`W棻]Ou>⯟qkOґ5f^OE1⻍o/hvCzNH;aݰ$y/ &rMNӁߗ/u1E_!:g2җ(ٱĞ`$nsk1q:¡t-3.vMJرS[~>4aMp l25 qT9N#)32G_ )_4TEJԱLvc Dzw4xصI' Bk!6S`o!&=F=Z3 #b #6YM:^"xg}+"YDnG$0H:eJ09_̦Kْ43|J_v`ť 8lxDKش.$oxg\/ɞ_,(XڳG.mI ԅD4`GǧqQoᖵ _ΰȵS L'c] o(a1$7vo4>'ED F#g>ݭ;j7N¼I:+T?5կq-$&" sCxe<ixֹd8J g,LW%mYą760>KՒ83ˉցNu1 >ϣRP.IehC KؚAMzȾ@.3Ԙ @S=r"mRź8M*(}7eبa/kܓ>%aQ8>d `MK52xX'kf^)1OUa=S_YwѢ9I&Iy,+~oWʆbGΐ{bY!࿪ͮYc@S@7N)Y,#q DjB ysnX> P Q/=Gn&G%oӰGVq {pnlYdcq*z4XsIי$FyD8\Ğ}kͲFY.f^q1NH!==}m9N lY:ĥ%PP*my B_nemmRխܴn32"oinVVymм(Ʀ0sB4)w<# Aj0^c俦.ݫkIoE,Ξ!rMѝ N1z=p_pViML߻o gb5]!2+8,[D&ī-wE&N&[" ?;m;⯤2PF4OCAwT90-guK4f[l:bQiԱs^\"o5RߌIP-`j\D[cfKoMPvhչЋ8:\TlPL}f*Ʀ+:VWEn`8Q Uٳ[m/'$?|7L endstream endobj 194 0 obj <> stream x͙Ɏ7y~Ѭ" 0!@ 7#s rb|/6{]cv[\jcGNuv;o'_߻O]Gd{?tL̩[d}כbcJY֒ݯٿY"Y nhMDX+vh)_mڝlZfk* 5 t$J7GC}?V $\c{zqb$<Edb؅i. sTܹOtkw`M#i?A&vW6HNF\ned7$.m;nZ@8|Ƹ̓>l377Sgoi1m§WDɫ RL íaVX@c='J: &'7:=&&u.rւz!u=-=x`Ѫ4l0 @];Xa  k.oA]Ï% 3ŃꛬnmEU2A|f55Ġۓu?-$n/&q(j?$L6fǽiz[>qr*dyt>,lo)O%0܇ZzaS2j)㭦Qm"oۓԞ!]1ȩj49Z+*VD咿ɟ.T Lv<4ƌc9S3TvHq{78S6ʌxʑzʓUp/ue{i},нYXrwka^K1`+p^2 fhNKHɚ8H)h0>WIɣpvHǤLJ=4$"dZ5qEީoK^= f #sa]  3T #1<0E^df>]`i_kAf{E.0 ,;l)kKqUCU8),Eex~EiWy$IzBUHyRJ`Gz P=*5P/4}~V@9˭xH.8Z!}X:Ÿ&91($#^ӛ]QxT>26yy.KeML4iJ,T$|Eit~ Gblh@kM@<9R૖Y0F(:/MA ڎB c#cG4#%i* _ ~?0蹎3^&[H23 ˏ99OuIutҲK z󞃔zٕIZM"dn# endstream endobj 197 0 obj <> stream xZˎ+ +)QEL]dPzU W%鷉&4E;]~_~~MdM$ĜCH&zޯx:YlL}0_eK-ً£dmx?헭~߰Э^O?˔MTfs41$,]lD|qy\z{]H#C{C:rd.V EXA`Ins6"96FGyت=OWx]zӽ-c`&eO= LEpY~Ĥ2q$=<9\7"3#BY&JAg!,Pˣ|%Q36"rp׉'aQ.Hٛ!"sB)QUљsg` !8Vg/=9I<<a+re,.r AxP]Z.z*.:z _LbH}8 9.=V3>,WU1UrT|^{E,&TlNn \pv9\bש>?G::dVMC}a~Wfԥo[6H=hko ׫L,awE<  :F@z.kw{p ^ypcpFWz(Y@?GoxY{{(/xyib+7u nkljIJs+-S&U2{c|*<렛iH@+S LL{{\Hz>4ϐ~|v0=2C{Jg9"A;Y|yH(Tqq!]#Ih@dPMX6}h CԷq7fRm8l#4+Hh,3jѢ Z+, 5ƐO4A'W\sm=!jXaH 9.C_ ꛽Cmn1t̀ dߦ6ɦ_f+aqvF،\{=B^W-tP+y=Fm)Lb\ȈhTFkђƽ|f\pݐuHsrN>5- צu=72uK:!dU8ݲ11(l)5eB;luy\Zh=$ ifFG 6\p[əXk\&h m ܞ6E5QY04sXʄ*A!tS1F [ iK?Uy`u]c,8,Tb/S+-3K.nmԒ0 \r>Ю--\rOY#ք 2 /CsQ?,U^J*G4W"aAY[i"s:WJ A^[ׁD:ܸ׍tRiEv/9ϝ:ڙ+Mle#AWW}A5C6}VfM B0+3CɫEE LV?yԼ+7hU8d|HcIJ-'2#L22,!6uNw\ޓ9냮(h̳zt F@W*˩qFmuRZcši Nl>yj*lȱHgihX(^2 Z)jKKbi"ֿ'!}[z9B<;knoH*jUn6i!>#1K=N/+֦翭.z CDc}']q [d* 8KyϚnQU߮ /jխ"TZVj[Rwe82uO+ů]l^NE-]OJ.?m0j5&ӝ?ۍ?qz숗mƮ+_{E ~=r/<~YNd {o8~-'o8?#ò̊ cJ__&Q endstream endobj 202 0 obj <> stream xZˎ+7WX#-"$2ӻ $d3#zTYot|]UVI$uHRY~_h+'o_X_H(#-TFqtWOGiTW:Fx:(p^N-NĠʋxLx3yJEBKdl7c, k+󕯰Pq1n#^)c}ސ_3L@z( =ux C mu_eQrIC8wt&%!cPNcU qX T[e"r['c!Mj6DQϠ- c,S3l{3ߥ;tS:*-ap]nsb2vR'#6NX(tP겡NvcT<Ƴz1cA &F>l7m(6~v6:[/[!orX5MDUڣU1_tIU|[*TRfdk 2tR'SIl~bM`%F,N jV_D,Ft߀I9E5KqdsP .a7KiH$IIN]RN,"y}O{AiB2 exA3g5ch@ ,d U&;3d `J&+u|]Z$Hϳ$*\.\ZΪ'eM#]YyUcE /J#/3&:{bwUjޏ>e eB*VP.tx&S f=KX\ ^ekd 5%/\btYoЫ@U@ iҊQrg_:'x!`j +n:@_ HPcE}}5}zOc qo ~[UtHV钴)CyZ|Ƨݣ,vgsĽPD$c"հoڊ"FX@96)N!2am#v=nBo2u@$"=Ԧ+Xݗ&@9Cnf % n[,Oåc%' wmAYBsb5p$'-zSY:DԢy'+XB@1_B.j{acX2/$co:y[dz"DS\~&BO_2ɣtVXG;xg=Spֻ0[%%BWOh(cfs3d_yw725}2@!{dӰn0LAlQrc+ԃoͨSdn0.b\C2:tfE}%fF3:}$BYY@ Kd+)QTv(`ؒFV( K!V W,udj}2.pKKi3yDE8+#"Yk.B1ޅj3nyU%ɥqFJ0B^ݠ1ӝvC-~z #Ɯ~됀V6D1݁ Z!!ؕBF0`pN U }INyD1bSFa4YHCɗ|aQ\F3HG>n[MmKbQ)4w"VXd㡿 M=</S rq [ r^DֹyJhEq$bE>unV|?69u{X$1ikBTdp <0l|dR[t+e:m4:o=WԣdN|VhVY]w#38xA6N E|PY,z>~Ơne9+qu6B+M_ӧRH5֏PVL`,?rC@s[}HlB[7[2H39%*A#u ML|%CdPnѶJ' N[nϵa~ZuD9 I ›R)s+ϭqvUyU%ZAް75l֦I^鮧3ަ/w;o<^D_=8 Q"P endstream endobj 206 0 obj <> stream xYɎ7+D 69H\CrnQhl1Yޫj-,H+o|ϋ?=/>BRDiy^a9 Q%$.tZ\qqӠ.i~BORkYqqYMl H?xnϣ$*ofA)$TWzVLyjvo5e)IYS_bW$JE)CCHJ xGp -5Ϊ; JsGlAKU1ٖ/R|VN+i8S׎M5%mUc/IEKeI\v̌n?;dgpapOÇ[N{3ڑ.DzıwQ׊Asv%?Ji QT=EctdD$u?a!ZQީf>x%EFy\Px:uM'HZ0*aHm3%F/]i^KIiSroܠl$ OiS_ibS=VRZ"5|hth mNe H6Uv*=T,Ƴq9j-pjy3"N-Дܶ{Րr4*l6Q`f7I%.2kn9w"}e,NAN1GO>XF#찼+aƁ|N0b==fYV"Ezfr=ʮ+jIP ﳻoYEvЏ@)oeޗ. ^T{hu ruڀȜ bp;okSneRkSulo4)Zz1~bF.gٛ:ju8s0kz3Ay#5::):%C9[P,#u]eqpĴdzmw6;8;kI-۝*:쩲#ZŻ- XU3aAce^H[Qr{#ߩ4rY"ڙ qM1%Fc Yꡁq'T(΀)zC=tMh@H.=`F&u@zGcڷҝphs%ݤ}|T-ɤ8diyʔ%@vwi@"HV+h7Ofn 5x^~dwc(#%phi-`_rc&4B(&/ +lWZ#mAO7r*wp$5n[PFs2!d3:Y?܄bBDsl(/Fy_IOS^HaTԲQMܤv$Ev9KqhwvClZqdѰ̍qt SVn52#_!̒bQ]Z CnL=la/Iu@B&ߊ}އ744oE-p^5F2#/~[w-㋕FP++1N^(|e`"ߢw&ΑS8\g;]y(FS!ԛ'M H%H 6t| 5(zt{0 ><{:7 \G: b ?8,l^f5T_܍q /hK?'BaOРbPYBaru1|?0 endstream endobj 210 0 obj <> stream xYn3G)U'@0X6^e $Jt`'9{(QRsQw}U36ѤGg w?bzϧd"4=]'+0.(oxzzIkmgy&ҥmP*x^W݊UK=5"D)9lrdB3f )Ȅ.{@&Y0 Ȁ֑m-}Y8]cO`lhs 5t{qצmT^l2$3Wp8$F!{2vn6ҼCǷF:A%|t;wWNUVY -|9SER0="Jw sb-\XIH,å|dفNɠkR lDl24.-rPcS"{qzl*yxƅ>-݈xZm-4<%"_UK.ĸCab}F`AF`lK{5ɡ(43z٢0tp<Vʵ-_##YûZD`pן0n5MynKNhk>оT>} k}u7/_n|jf rvp/M endstream endobj 213 0 obj <> stream xڽYn6+b;`!@ A| rǹć9"^&$F FB;h0 ,m"%(6--A'nN<sGoYm3N#CLX+[x)πW@TA[ْngv.+ xN&YI0y&2-!_u1QF؇,-eXHT'z{MA=qn]u+nMDT^CO[C͛q',nNv#ҙ Wf%Tf}ۗ.?φ=Fgem,|"vDrQ"maq]B"p 6)w=|53>O` 7D[qvoj|!<_Cym;;$*_3n `!NvGػf_D]FTxEE5V߱wr5 9#DvknI&"vl!KGތ 7暺1o|#"OA(TbOdq_$zhU+=,[KW8 q_qlԹSC\9.Dh1M2uF92O(OLbS~AX=oɤCMZ 2Uv@:3;PIY{+Ep Jn$Zɥ(Qq՝ GtF󷺿Y=΅x+OF7C9czTt #(U#L٦Kw@2fpf5h[x4x٭5`Hk1X`؍/Tihx،^Ϩಿ/>Լ! zd11DN1RMw?3>lĭP_29)Yҍ ], 9lw3J$C5 9'!ٶpJ>|ݬO endstream endobj 218 0 obj <> stream xZˎ+*003fһ nMbfSDn_M-HDxTω&?X_No+o~'"@F`Gye3U+,.JPO4' ;(棼H#]!ϯ.CZDtoӟ`C DStXbQAmVB[q4lҳ}fL2.]v،H`Ϙ0ϝAi@=$a.,6^v-Y/ J_J+Y?NSR:)*?gqο%qOC98KADXL#^` <\`wng>1lkw'$ ZHGUBuRp"[:J|rI;£g\ު,p@}!ridרa*(G9{54V dAue1 \hױ|rx3\cA3 [h(p$EW:.G Y>zoua~6+@V s-(:˳Z Hv`;h=Bp2uRue6&.ICN[TkkAI>2ptC^ъ(sZ5YE!^řF<5+8z'@==;Ce$0gQ{ U TMߏKV{PKXBI`KK{\:Π{7LKBRdHgԦ5,>B b 'p~@J!E24luJEkZ V䖆-1/#jbX<:Te߽{AgrE}3 zBNhPTu>Q-8$ʌafGi[Yӱzsnzc=vB >V̪+8X2*VsZIXg";B,=ΥJu&ġ2BKTʴVȔjKOP UE˵=RZbQeǬVI,ӊ13tvҖ)C8D4MOkۛzi!-3y` ײ+Uas*Pi7P*%/|iᭉ)ee؜ۥ5,6a|0`_Z8djzz#agmևF8M% ^X20í5W#!YHh=:ĈKFR:pE\֕y ˶X|U_Qus=G$Bzf6}eʎΝn]ܓG[q+kU]gnmk\/eN-S2̜ ή~؊n [eNENL'a[Vevf_/wdv:xSk9<\>1)*df48./5#^k 4foel|_ϷϊJۈ$:ؙDM1V* Jd¼D<)ߝ%+Sh730UuƇv1c-A \|nҢ[r>N e֒hX2"e 20\S_[:5/ n䈪u|V j](]YStwF>ʝ!FyYyzhJquNbƪ1OV*įNPdt}j,y=vɮ.^(IW bf劣~I_Ppv?}W7^!!_-mjrCW~bB_T.eUJ]D)to P2D`^쌭9fsĢf-d@I-:2kpUGJt$#UH&jI%)b^\%kcYT\K4ߞEbZy}F҂մ GQ_ʠGQWOBx-ܶ ;eВUB q ˜US}Q*pmU S-,wG/Љ0V._\Qnn6m/[tKc \nEL]|ie+#m䉝C.<#rg[W▕E%n[[VNv4~ôCcS$)J׵qز2-+߷D y-(ɻ̃Za#f67|p"T"Rs~ endstream endobj 221 0 obj <> stream xZɎ6+bXEC$@n-ȡ%sS~?(RڔK iĥz 4X!2~?Ł6x;c&Q=ؗnfKd=%|uCCc4_6膑 ؏,h#E) qe+GllkT9i_"pL!/b7b8>SOd"AжϪ|e|_G6QBbσl8':v}F|Jro7ѓ2i6pbW' &`Ba޷f亱p+ff\oq6 dkUW֦[xlL[H=?ڲ7"dҁD0k?F赀i0'և"TL-FpwyS[{;l;u9𷥷\)#:OY168kXV1h3b$>'|Ϻ sq^Z>lY~&%+'ZnNîo9qop*O/"L,3J%YLlq++PRGPŻ]F@>;`AtyC n95b)ib#J~" YZKYd]~-|FzŸbxnU{':c_.`$]t;;(s^qdR݂J:6kwTvy.Yה-[ vPq=OBNM*ޱoЎ k\!@h$Yh_r]pȞۘ=b1˹$. qm:B'h+cdVZz@Mwܬ6HEyܦQ؍n]Nc5Vħ{N'PE-R>GpdMW!=iY'|8Lo&Z_@أvr; q9v]eE8 +E pVe4hRMވX*A<ǩR2u :6oOJ LȓIb[L;ȑ+!Xxr/}Tl/ ~@ }6\PZhG^S9)N}{G3*Pm|+x72%_[_y{y&aI!/*2'@IM-m/4 Qڧ`зNuj\dy)yGT}։=48Xm/-g^;`֌njȜ ,Oss'GiyETށQJ C .YJLBDݝD|whs[d^JCI\:m ݓ ip7y`-9_!qF םxcqk[9AFT}6Ԩ{?QΈ sleT49!9}[ìqxS{;e@2a?mPM;X=_ $퀑fI[ǀ zտʺKxIRudeC;74+RI7dxY* |vƔpO .Ӟ endstream endobj 224 0 obj <> stream xZɎ6+bX,@ÇI[Cn9)WٔeVsjWE5j4Q 믋ߟޛ itĤ VE7}ݟAyVV:<7QFvbMuږud S9k18>Un&4VuۉC^⡱i S`BOCdU )|mFe8w{""2Q1.Tc߰fM~L~\g="y_0TExn-*Hs)Dc%)oaV)b ye ah V=f0gCj ^E ȧ{G3n:c>AidUq[ea0[흲qyh*oaȥp5nf8 x]!A[M28Ӵl2.CKnnV^^ fy5<}cBRkS!i):hҬhpkXep OeZtNvLSF*`HˀWyix9j)!EtXAAziNCjE"\DXRZd8Ġ"2 Y g23E3Q&(C8$w;HGb$1_79Z 碫)JDC^DI%c',/Uuԇ@uIu ;3+؊Xؗ0X0-`O`yFr]+fbvEY+8{uT׭ɣ|F3|2+ t!>@IFf)r֙6sL C-x(0>00b8N^/yX#p%wd=J33T@N1 C_  :)>ҽ_l¢XO*0Hʰ;34*& x&I ]&<$'@qhћ1b&Ci+ ";kETǪ^uXz4錹YOU(ʍ68{޳ RRty{-"F(0LZާuTE}ƴŲ=y,_Jz{&%?4cJ:}IߥVUo=NmEh endstream endobj 227 0 obj <> stream xZɎ#+aШCIU0'_pL) =X\b\~_hGKdg7+>_Bd|X31|zr" /;>qo6Ko/1~YGjSˉڏfXǰ?m95f$/$R|ȁ)_Ij&:[9۷oE3X}oGN&_d ;Ry!N`Q l/MZsxA|v3CӮ3\6 uѧH=Y&FF 8ζvl,ѝ'N F?$$pssQ<;6+=nG6\~H{d<[2o{-vCYJC\ɦ%F"XաAg<8(uԡ ,g4 f!Ȩ;-[1J,ֵ#w"Zpw/L 2n'k8SXU jXU+*"@wG`i.PUDgsWĊB wn-J.3JK?fQr֎LLڍ>:M/*c]"O!ƷakkzXNT?''&ޙ$D/uɥ7 t:FYޙV2cQa3t|L_-'3-^Ӯw6G@ ;"O "- [{ A6#qje{+AVZ-0:r~K&yGɄ2pGmzq:TR>hɧ->t)RvQ'6N"KL7nba$7,2^JHqH@]8N8(ź;FQfJ(8YSf_zm퍊(ى9ݣ㨿h?bfѹ实M@ۗ^֒bak=#W[#,jjNDJc0ocjN#Wa"r&_?䧌# iQTa:X w{NU`S)c=t۽[}/E}CYړA@4~U$ث훡:WJ^_ {7S}xP*u/+TuX=x4U+=ȺZ Qy.n}յRdP^?`C ?R+wZB [H<v$y@Y-)Wh<;Oz ,loqKZ ?=W;n~fŠ[V&Oڥq 9+OVd_Ky\GhG_0 Ϻ}NPRQ M~~6pl}[ԎNm!h`+tCPW8oM >Ԓ"<8q˕ hׅѯb?yVؙ`i'ĶS<)g2vȴ+=J:ɴku>}Hi jhw<^x}%@3sis]UoyfvzOAK]z_/J endstream endobj 239 0 obj <> stream xZɎ)fX >H- rh0yC*.?t h}B@3>oUp IEgNZO?i}j{v' "}g; l k : U vaLoKXM"2pkBI=n"P9S&g4eLtKEe\!TW:Z+uuM3=uXaL+^Xr[ؿ6fP'YpTm%3!YpP"ۺqKۼNc[9m,-o?>}!2zU_jz>X/"1v?B!bC\R4#L`wh7ji(H&b I-Ѽ3a Iil]T? -ہKZo X*K{J>x4 uLX  |Yض8$YMaUB~\V.1PHSu=ؿ%Ų:ߴ Q 2`:vEZی59Ndֶ2T i u&zp"|y4>_aS7>rY7VJڞ {~O䖱{5oZ 4.A;T6{NR;cr su,|{d7;ِܠ+[섫~+]s~^ؤQG)& 6 &id= N_[Cnݯ\^lqe{08qG*zH}HQt΢6&ű|[0Kn5kj}m\[gqMX_mLjUjn` V{fd5ng1=SN-y3] \8w>4qck,`ŋFXkf7?r2e~î&AB/sB|SOсRlwz Hd,I7mː/j(PpC1}+nD# YB>wԫ.+:._\٧XVxBw*\7yKGZywe@8T['ucFF P|f G~L39aX H:Љ^*x ǝ3QA|NT Í[WhN0ϙ晹ѻu+srqv71V̀:~{f~&5eM{vxLV].=@}$N顑4b{-wÁ2x<4W4[ShNǞ6`7n ~u+lݭUGV&8?s}7'/CwP cUvgŭKu}KPɸxLM`4cRcUio9m)~z}رwA*!A!:!UI] Z,Cp7aTv^pt{:|tX{ߜխ$k8QCz ?y9m>Rt;ɩse1 nʄ٨|=>xd՞bw[LEHS4gFj YZI~LJ~[&Z ~c<~~ _wH 5ÃGN8Tw+~}H*,$U; 1Ru膁 $z*x;rf N-N cU YJr${;Vȯ~^j1|qmf1U ;\V|x~_D\m!nmB:G,4?B񠭹MqY)9RC(. 35&jd7]~{4OGn3뼃1ZDfpʷxe*/ONw`GsھuSSh< wM[dbv' ^Sy-Q%8Q˘?&H8?=ENϒ:`Lɑ##|C7_ jr endstream endobj 12 0 obj <> stream x\[7~_QH^2L2S,dVo̯c]%d[vU<sx.dSʸ*T\ʤg]YSE+kreVUhtXfPو#1X`LDC)S*1a`=1 @C+Lqє#Ð0RyqCH mJ"XI)eG r Ucʃ@'(Ŝ+ Cybh sIO" x>06 ~FgrU0T%+7@{'P# )%P$ -(anr5%8w11$3P M < \y80Hs3ȁ-ff+!y2" e[!dq0KA&Xb(vZ(4(Vej8` &>'0vcID" 6IDTۘ9 q A1lA}KH#9!2I2Dh4g>cIkV ~S<)0Rq|: "zQHKqϑBL@D  vE =UP b`;Ik>.g(r#sD G2ŀТx ޫi"AӒe^Am zAA $ǚ+Ȝ5ěG Zht(uQ ?-:EJ-|JRp̒1R S+ybX=g)a)BJ,.%pRֲXJI}&3}s0 2 f,\ 8DLah:L3 x%W Yh-TW#ܤ6ZCj`pq%r+;q|CKhaX}]u!Yp,X.(,!H*A]}\EtTX&GPp_<,׫ 0{e>dސXL8X`:CNˑӾ_͊Tτk9<Щ w+3Q:-X[,>9X^rS')DʱD }Qw MMI|[c$wmk}=H %Gx@i&3c39 |xA6(K U*L?g!Glo 6lh'dXCu/ 3@D[~Z?WW\+ :? <6d'>ͽ FF gc 3X +Y1g%l~b2WwYn YIIKF&O 5_1F7X3 j;2XIn*1'>=.\^X},k Fh^>#.Ӫ  #GGv.Л- .zNßOr1uU?9trʦT,c _4yȎў{H0d psm m 3*_6rCeN7>>½9j K֮NYLp(HXF-[]3=\)mkgi t,[c P`jw3a06y[lAЇeWmYښ ! ЙzcN)UiL;9> !)n -bn:uX~}DZ%QF ܗ)!c \E4^uRcsċL􁥧À*+q˫A,F&rò )NRLrVsRrF747$2jbI8rl Uwc X,ɹ 8S!Ӄ=g+k.K2*dpxɐm1.Nd3TRuՆI +rm_o@,a ,veAD[EJa _ d V#N>FS%)!tSO/\b6pG ބ !&-d*ϴ|9HG\銹kvba/NT,ҥm‹1%Ǵc֜4nqaxxxqysUY*^^׽\qў 6S~f\<^~)bӖn)R8cq c;pgN@NU?أۇȝȢ &ړdX}43w,$"*n϶/||\Ѵl= #I= Lgh<-ȣC^!-X O"0<" yE |yj{cX.]:58wṃ#?~ELnZQy7NjN=X7\>PI4z՗h%A {>XҬ:K^O"rjTVˇͼN͙|{suHZ}\ruV*eLyXnYEO̯a*&7VB[<K[Kč.^KxP9-73jVI+S*3-8w7xŜq^UW |3j RGV,Az7߭޿x^D L=:o l79[teܪ"dEf4!}9 ׶G#hAۡ-1raƏN4v{lѴz M`it? Řv={j,u{Ǒv=dT1q8'nn<ܴC:Mk>˭j&JM䞘P#fnįU7ns3E'5m*!v #2`3@Dk-4;``d$^ ݇cu9e K^ g A(7;T$ o;y0wzhwccf^l/o]l#l)܀*\ڽMRml!A3lt9i|ƭ`l`8 "wa>쁱z+{)>y ؍9  rԎTڻ/.1/2+ڣ>X?d"#=|"M.YąAIj}s%Qʻ bc px\m}Gucp!d%9oHpHaOytkj݋Kzt;<<|C\f5=}F{,{ύI~O=}y_I!Iߐ3pG$^|qE(2+3}(pm`eo̹+^vF;=I7zta*5&H^Nj6['ʗG|5 endstream endobj 415 0 obj <> stream x[KW1p_!@CCY;0$C+οWL&hW]=/X7p! 6癌 ٸ"; ds$ d PƬv68d8aL&X|-ou7 : Im&D ɸa ]Ty'B [gY ID)$ӓpX+XpCP,hJz.z=Z'J'PK"7DRָcxcќvuEd`jxfY_HƉNca,1;EXl!Y"Enr#RD1HفK$'n `1sa,H xr sϲN 3yyeh>]E<:x=)LTx@Fɝ@rN8`bFyTW=D1U b a"aģXbC<*HS*>P1p`ɢq""цI([o |(g W7Gꥒlf؊LEI.Hg܈¡vpX=`1{HeA@a1g9dzu!IdЈ0|,ztdh$AV 9HH: ZdxdMd l0wdh6Kh2̍04cI# )\lA2E:24[, SB)f0 &$K40C*K@x bp6lf,aQY ~P Bv6#\13a(`N0Z@}"0\āy Z5Gq},£~JA1X1>0\<*,%1YqbaQII+7IJ##τ'ю x,2?͚SdAb842ː 62~w;~e]5UG+%>|GAAaR*6=jlxesgjzj|2w }y:&/]3jS*Z#yGkI5l?j?z0O]fv8gjϽ!Ou//iyCGެhm)qq(11JE-6Fo8A@{θaO|9yb:1r;E!+`t {CV?訏N'rQtȸ4E?llV4BثҒ}Ԛhn}NhoӽJO#.G Ay(s<:JyE҄Fq3vї>ѧ-/܀>!'@rS.S~nq?[t>}Zsc| rz^qxH_j9/0k}+Xoϊ{3p|G;p?9<)yq?BN +4OɖA >Rp9Dz0tÔU]]f1$ pPIUfaqJIQW|NR*oyoA nTGlA LS'W4DA49ak5uQ]5نIE!-8bzjڰ~ :ChlѴysz/~^IH>v4^g?[۵\d{_-OL Ncd}99Ajy:М/DN Byg;.2#y+Rޠ/:7't+'ͪ*ՌvGemFӶ~jpg1JԹ0_;`L:`JM {R3ANܗHל1(lEa>#jUJqjqP)jט[wd}pq3qݢv:M| WE}F;AFoE6H׿o6p_]?'wPp4ŋE-!"ijrf.*mxhK|&˚kWjފm}zE+6?64aCoOU_Nߧ"iJ?n4kHi@1u5Sߙ3U1Sߠ2E%S1SL-M=0S_0mS_0yS7Ws1/b_y_߽|+L#4UywX_r7eK}_V|g=54#rSϙbQ3?]jؽ'z>gxgy'6e' endstream endobj 513 0 obj <> stream x}]k0$fBmme:حNX~9^,yߘ?͖nzoL[г]kd*`[7MPv>cy毞^7ymL_Y ;8[cLZXx9&Inʍ=Ll8{ƽ8F;A]Y)-fm}jJ֪9b(ABNCv P&"ԷQ VE5P7B#f4!Ő&"taK IfR ɁH)hIF 9Ȉ8RN)$"RDB"rQ3"l,lGku݃kPG/ vw. ?k;W` endstream endobj 514 0 obj <> stream x]n@yi EM"DvɊi} U a0v.SUw~5̠cZUbJ`{8Uuٍϲ)ZN6ER4|n<ȿt4^S5k _[`rt]4:hEc{̕MӠmMVG6%*٥mOЀuX mQ)W̢u09wx  <'JH@*׌a\|$$ ٻ$zTE> E DC!'C oGKۙQBrQ@Dy\"C?Nxj?wm{ۢcumVpVC?)ڏ endstream endobj 515 0 obj <> stream x]]k0EGNn̖]ztBMBԋi ys>ݴ ٍ4*idM'kCLEBm'; A艹1ΞΈjEtf}>|jj|ib #lc1wuhnlՅۇtesZZK9i}<'IsUA-97|d]GB<@hA%rP`)6D%..D""> stream x}mk8S=ˆ% rw˥޻4qܔh﯇f$6nӵX·]w-Oq^ίá+S8׉yYçsW_wۯ??='c.]&˵{nUf%|]bGcxgg%RPceEpiz eZ\<]_GKۖ._YIȧei;lÐ_> stream xU_HSQq3,[eJP S9bku=8zI)| zAQ%?~hD㪾~_{wRqD&jK6`XF !>]E5*t1,cCyT,Qhx@$EN8L!3,H]fbo} ]EKe匬ffs5郤-ʥҲC'Cޕncu \aߙh߯8S9VCcӼY {gzL endstream endobj 520 0 obj <> stream xMOKAgr02 II]L C'axJH } _lf*5eyg7 s\v[ɜ3ʱ,q1 X9!`\- a:\}Ur/^K$QZ(4DQ"RIHRVFHZ:;ܐZ-t i)he]#y6nkrR$G+*}Fz^1Du |;?"'MWxwG/-CPl` Piޚ=c6Bs Oéf)"ҏ{fHlY!=ZH endstream endobj 521 0 obj <> stream x]QO0}1 c%d$<05rLQHN] gGi0I2êZ%=jeqZ-n/Ym_fX.OJH;`b4ν&UU˂b~{l)=܌.Af(Hv#4 s0D;Nɶ+$B a,HZʫKWШԎF!RB#qtD>RBJcO6D\$,(ɽมCrOPpjcSQTOծC=2Bz$_Pɥ~DL4P8|<'28p8^8U .صݘ7K endstream endobj 522 0 obj <> stream x]Mo0 ;M$/ !QMդR0(CzX$:ff롮`fm p5 \;eq hf^>OsiYI5maٞO%Y-)O0|(4CPbXOM xZtSW9Tq+I0'k4TWbgY e%߹(R/rd%HOCyDREHR)3YJ@" %KHX{>yHq>*yD|R!U}F" {dɈ2Grə "{!t&: Yh l}\1tMe p4qY> stream xڝzwXT 9[G83z5슽wD3P +mEtcX4ƚ$u̞{3kV߿ P֨xG_43Y𜐷6CLi梓X`0D,& B{̇ĊO {[H?B ^`kgcz^"k*ˆdD9{kGP臯 a _2l:aNpH/vmOW(p`e_xxH_焭rI~FNwG$0PZ=f^sAoXGWO`!``J0P  `" M"\[(ZaDh) JLv6c$ ACp4GMEl4CBRQډQDYh-6! !L>5q69).*7MLL_S_Peѧ#=h033 zz^쵸W]_ Cg SNߘ|iY8X0]%$, 3 w +{VG t=e5Q\U%vdd rt~/\=oN_xE z?|S̟!s *ƾ|m39@4<.Co|ׇKF`ޙ&I蕦.Ѡپ8|7h^ErdG-gS t͌){j(1Nh_ OgT6Boް o"8c?0']eJ`+`C2%`yʉf@_4g'OB} [V*08^?hCWph:~qD戒zUVED6rL XU Crf>g-Mr-u[@zvSk z/ubX~Ya!ױyr:Z#(*z{ *AR/mhͬ"Pert __||[<4@7m 'e~Ev++816K3 "~\$1=w2Pw~xlc/w4rⰤ.YKFF&O [$L̯O65~<ĬW;E +'>PM\*nڳݝRQB)l7{3?hU57/8QWA/Ө3O[ =dCf.H~,ۉVkW_?_iVM`SX>?@i `CDS۵+cZI=GΗ(k`bB@$?k:u~8XC4_K'u2i%Qo0Zb gCh,XYSXC6ZUJ/jus,䒪g !T У:ncE R s|PRY3r4-,Abd?'9T^UU֑D mUv2gnGj (C~;kSobRulۦto#$ê|+#3tP0! HD6'$"=){K')HBu#fח. NP?U\|UdjMKHMvKIH-Qݷ݃55%d9)J d hujBv'5MeS'xt['s?-]|Tmkm,-t>1;/2[6m5l*}Lh=u-J3;s(leh5}Y\'',!thi|Y蠶H]doB;5ژO ap+^-N*XEg9D\!ץ%bjfݍUt `סiH@I%UA gdG9%$ޔ7 ^/>Sagxr-Xs%雮D)?y_z+!`,l⭾YyPx=Cՠ!JW(0`ދTp Z¥ь 1j%$+vO7kHB-a`%%Y^Ł(9\Z]JB@u916:ɖX%JN̿AO!߼'ak߮ijv҄m$Fq(E8p//uSlp! nbdƝUzC95fF\ B͕52\^.E7k7IWN[>#$]xf" WY4xqZ Z]HgƣO$U&IʢU_~7p&i#,iUb\9d=HxJEfn<\QfaV37٩~V*u8ЛHY83߻Ut둧`e||k{ T+`N*z/#[+e3Mx_o<3c;LϤ @(a֩՟:N343J yK¾ / CL[ǒh^)JYg̖@zokΨ=xL&ur*vbo"u:x%+~<ГbBQO*-4tx1Me)1;S8{!ڡI˗g7:@K8cMe;ɺI,lfL,_Cc1 1f %;8\`Dw`* U'-n$ V˕"WR$vv[Lnou[2Xr&{^!6J<̉`Np"8(xEvz$[>0bn(]EEq1ӁDP1cC|8dkFJ װe%/q!7<P^jt( aw~MqN!zJ.$eHnK ,i 'pk+^HO4L]=:vƟ@5i mI j+RTiX|&8c )(_Z2Ez@F屮bxb9f}Y5NQ/ U9[Šfx$pQd 5v qDKfCLh&*2^N bt)xq˿] di43 On6l>#_@ę ϣieԿxѮUߨ*6*xJS?ᮐpBzR~?^%g뾾 جaOܦB7̓9-Rnݓ6̐O547 _wuZ񬧨@ȚqFV^ѷT- <8-ߔ'OV~$*Z;hb47\+߮czX{vIo2:gd\C v\+nRp !TSZTh]jԇ.CA%&ܟE%D}$]u@I ݸ5"$PQlڝ:İnC-nBọDĘGuz/pId1Kh L?rKFI/smT 蹝5Qkh{wkTBC\v<ǵ 7)!ȟؘ* ; 6X!!o-~x;߂-HA1lC6&c`)vbXG~F2AqU" {G,B%<'KD|(% KV!1ϙ?71s1/Gψ@I+jxScu67{tMUIczg`*)1 `cZgcgĒkXe"`+s`?Z<_]?2AZWH.f-"ˆ d՛:`S۫xS9|ҝ@DO'OqoQ n rO-Fm7?>oU!<!Z֐񭠗GavN ~o05vAle"u1C7.=8ʟh?Fj7oyH ̷0pbwΊ#Ma+u ڋK+箙Ey̋L۰VYh|xdh)q,*aAG!|0Zzlb$n| +iu|vo`6k"܃ ds;q&À.Az)R`0=A:Ͻ}s a'koIϸ08_z.:IGtl \5O,_tƖ,z͡GHvg/HjJBwU^  N=?="q͂@F/x 9L@xݛ'c1 (| ~z;~]JNl|`@0&[kt|2lVTACJ^o{% endstream endobj 526 0 obj <> stream xڛ ܀`qy `9, PK endstream endobj 528 0 obj <> stream xmYXT׶a8gˆssllb,ATE, V6tC2 E V7  1FFI41ћhyۃ~߃}fkA*15HR07ufz{a!ősA$"6q8F&5]`.;an2ô&fD"J_Mаhv 2H%r '%6/%H^KeoW 찳i7ҰPMWOH&W+28&-@mEwFkƅ%!jMdW7D,  U#->wDhlf̘9ns|udXo^& ,2&@ D{m54gkո8H3IL% ddiR2LXIk[ƔHeRS)#eH:X:Dj.ʥCä )'*åoIG5ُơh&C=2Qe hf~-,05-lFvticfڳ7]fcB̞ rT0xԛ34d1|<{rˑ-?6cXbFr\j(+?RJOe򉵋upfWYp $'OlW[?WZqqIuY|b :W1!8,A w`;ݓ^~KI^\Y؈1UU>p-kU^UJ}M1u~AXss^(XE.[$BnGMqJW+ŭHg>` B'?pm]D#1, 60A+o_B!}J vu7> +Xa`-Y`fv.'T?')˵%) a,hޭەݻ!b\R<-/Us1 iY`8ţc`UpLV¤y)<p+sLj< jO2nMd1'dd4RfHNt`.XC-)늈U%O]LI|h/K7+}e\p7%)+ C۟QF~)'m{0w_ζ}˃ N6| kHH y.k2]gS 7dH#DO*~hsǢ⮁ HQkKW+c\ [ OdiZļ2mWĽ AfL8SXr|"-|GFBf &, ʌbUk˞*~5:_ԀAghd⒢g J&dt%EpO(/5b%ԈQ쟙m¾ELzEje"љwpĄ4?t6f&*PUY~K2+,3X1 f-9gYN//a!0> ˳1GzEҚ{&Vq s_WJ2)Ҷ}i[q إ<pgkӊҶZղ4lgxUU"O M0D&. >~1a0H/\Ïoڊ+zېidw@7v`yV]W}`$L8DO`A`@R\g'-F::!n`v9A~O`񇭈LhdM&,T?'%Z$V+\xb;]:_>n hxlqG ŠyG0~n}„,;אac"2;0 OtvE[R^mҧ`!z4wG] 4|BO)KzX4[{Sgl3uwa>Y(#J| ƽ|?nEEf ,g n_Jb#1Iȫ)=PYPu -{8N[]Vw17cقm8ql!M1UiBlo WQՇ7H{9* l8JziPu|t׼AX~r$}ҟjRğ(Ote1C]5^kYpnK&34ᅻKj+3CrOuTfǐڎ9`2m̊[n$S$RN#Q)=.z$XXˠ2E { md*a?γ G8*LRϯj} #^,I޿ ßi -. .peZWxnД΃ų_);13}446ҴjkJE#C#2>tB_hD{DBcǾ\\Dsxb!Dn˚X^ _ .:1>D~n~O5iE&wo6 a{k pS$vq|k|wn t:eU۫.w-?X2( m%GT5U=CT \eTc͏ p'.4o^Oзۯ{"OcG ΏP=xvZZyD%jx Oy=,ҎFAޒpULmsw v\ M*ڼ q)J㳎O޹,r0iNG1 @da-2]1d`uDR m jD.Z"QnQ;"F\WOp!`ͅC7`jTw3ąTm;qΤl|AL􆺳0@UZ|CГ)K^VaAȝiآȀ00-Si$Vtb2 L}uyI J31N' >k= ɅŪ} w @MzG82Q&jxXG){ KOi˃NgNMS%xQЪ! &9Zp0,fڋJS> lHߑQGiؖT08z&$NyGPOv&vp)]?K6ĄUaAT|݂|ZNT fK\̚p/*A=,TaF\}tgyU?XRibMLG Yʶӎ;:aiRiEIIT _[_qt[mj!cK ]?]ah?8D6ޖ71jQcY'&55Mg 6a o\dGvoCaҔug-~QzѸTܝ(= Ӕg7zz`)z~*D71dc "/d!&m~Ghgsuܸs/cU}>kw&>8>"̿.Gye EXiupK8GD) [8@xD!M gV>?}._DvJ*+~{qտϖ>gICYl0!CU7-C7!aaͱ#UpSP3qP/) 18bTcSH% OrK^#, {( _7t_nrC{nrwi i<:.sրYږ\=p,,33ԥvE ;:.y>& k#"n݈㱖Uw΍&Nub@'n tPn=XApݱ!Ebc&'ܓˍ}UZgc b@{ ZFu+甘\XP{9\dk> stream xڛ ܀}x8lkK endstream endobj 532 0 obj <> stream x]X TvQ5h7p_"""æϤL hT"e3" :1F_w&9y]soWnQDQtMHfN2 ['QT/PH KM.v ނ ޕ_?;7:R~F'œLE1e Vzx 4cwB\²bLFSԲ(c9yeJB\2[#@~ncf(!11k^W'QI&ĘXsRBt1xФA;Ffp`/tU&8 NN.ȂC)\ނAp A`C.x ɂ# ~ L 4a0C)`a"B0a0O/ H * :τ3B8V\.Oė:wJV]bl,uBRZ^Vk\544 !o,~~Gt9D9\;mo2mk~]nZ 65qYwegUQqܹsS^.7ё. M2"ukj6F.n+zr)j)dmmv鿀/6o믲>φ رL0gE/M;33f Swڷo}ii-3?Ÿ B?1H8e]Zf|/&K0d y^L239}P` k;@E{(Y7X#H腽"׹Qf,{+X9i"SQ@>9dQ콫)cpX+ݘߊogFY phQ=g"&V(9">.d~w5 gsoB+NrrIY|5p7_Tk3[8Va\<$LqIأascLھt vmW-i3,=0"~{W6Sfn㙎`+ЭuWGs3;"0V\gw ,ŎWY\l^RqI^~q9?ƠKϥ[6qE3qS}QWzH{q9A8$TGHegB3 'L4X3O4n}a@uS'j@kFopf09&(=}HEGN 0,u@=}yۈ 0k ^ mn|~~p~V!UC~}9٩8Bc[U4C1}N~>Xm LãlL+y3Z3,"j[3oUl3۾R_:C= 3!<;8 *î%}?F='0a2K1W-⭻wOwlf'/9+(uW#%7BE]a=p :<{>嶷U THCE \#;SH2u6w ,BI0Tt۸f9/ֵӄ0'=/h'^AExד\묫C';-#}0mɒn<ʓCCswEΔs3|@O"*3rDA쉓e E "nAA<(K(H^i/t~%+ {=2^;SlMӥ[r-\оA|E{z:]L{_7ܛz'7]3:gR{8Ig30Obhc2ě^dIIZTr4/Pl>dHPj4}</zUem>>  %ǯ^n>Vk1֕tXs)(=F"KI y"{4c>o@;C%Z]KN_f3W&xV2e#oj|]R g>\7(Zު~lAшӈ7O k3gP{`+Kl#GfX`8\,5礐o86=[q$:z^m 7*el/ױ$TkR^.\雰Y(9;A ) $E.ddd=FC>[:߬3e u9pKѫB`4U TITBU (FjqN'J0Gtyݥ^8N!U7꿃 Ң#vq%,ނw+s+jn Kս̠[d=90_ނKDbbLJZ,&sI]{&enPR1~W s8?ˮzOUr w];|iy,mrEcE/8tp§tI-mUmkeОjl];v0'lVyQWSfsk;;Ԏ-K0%!!'87P]el?fT_Z6oQP + r* ( >v@H_]It%_Yy 1=Hfl wc5POjEL_lK٧i+T{\ y?w"/KI @Ě43ث{5Zi0-WZuδ;%EDwz)frW sHCH.XJuWڱ OPObTѕoh1ubcE~ccncxd|X41z |{T 5bGu4DcoS1cc]*>!81+TeT}" Qɀjҧw;(rp\9?K+tP+9pWC5=pcgB: ]]YR|ټ@ ~rO[H:$2vQZK.<δo!¹w=*tdLc:AFޛR$]sV [dQwl-I:/jyXT@gKK~s}/ CV@ydRR-E'0:QLF;yG+v;5"[UtAyɜdrҷ5l*+ JQs^[uw=N=" \/$_z7p*g\nI6n>dQn4Ktd-!g<,oM*1ll3nesfOxjAMԹ,UyBwg쇩؏6 q~.-i1?þd-䀆`fƖR%LXAvKhu^1 Dyksl_P dzU~FI,9Yyx/M㢍W4*u{_6vi)Yޫ Qko䢭>z` o|I u/- d-e*źѷQ{Rvy;gA^Is2q@fq%djVgќ,t*",1 j[s}ա~q endstream endobj 533 0 obj <> stream x6,!*= endstream endobj 535 0 obj <> stream xڍXI/8T*2mNcYTqU0c Arf̀H9' "#*V0縺{kjWqNNӧP%]d: k1kvX{iCa@.'pZ/j `m]qw{îTC]---ю^nuz&gGPuiDz293}[;7Gc'L1f@k7ہ?ĸ^v}m\\`7p7x9X`{h=pNG7;;/ocb3vNǏ0f~U^N\_w/>>3v:8kJz kSJw/Wk-xZvmCڜPF#GiVh+jjҮ> P_GrĢh0ah8Fh2EZh%2Gkzr)*GeQgx'z>?MTB}3%+W_^B}[}~~~~~~~~~l}}/NߏtkH,OIZeZ6>wͣ[BQ-$5q0mW#5M=h,DI*$tF8>S{P=tB/;um{">ﵩםz?JS.HOK>s3+sŀ7(kԷqg?,,?gIlp5i٠ ~8d̡Sv =pcG$ 0J;2n᨝G=M}o2ƮcƏ;rl>?k O5$ɆN$&7_^>I^cJ+%B)Fz%((쭾dJ@S:a䦑̰.¼ QW\hwXl [m'2R{_gC5yT}W8yF I n\'Ŕ `ٶ&rMi PiU¡a:\%SLxhJ9srNa =]fUƔw*e`ؾk }IIsM;u#%\y*ԣRG# c]%t;ؗSK})Yk_yX. ^6UlxwUq)Or@䵩-]crA@3U62$KflFC(HeADD_'/d3}m}sK*4vTDDaى>\ z0W?&ͻpF){=1J2T b *78Er:-uwr>T_B)v1|`Q8C,%WŷJїsb=o|(bJQCq0 SqN@B2m0AC1\%=&)@OÍNJ֓F\&!Ny&3o*e/;I5(6$يO=vT6]sSЀ )4Up=";&p2x)XZaFfmcf6#=lsi1잋SR8f^8&!'ɁlQR)pùK=!^ 0q}BXS P>ǹ/0h(0 \{*q|˜ˍ#G#"CN@iLQ>*[O_ ̩YZ\jƂux?\ɪ#W-ޗog;Jd=C|n/a87`ŧfZRT.VllO|SvaRv^_ȴYh9/`db;Q5Dc㍌UZiy6!L2.V .s֗Yn[lz)}:Hjt넱<pcG2g_N 'LBI]r;qǻxU, u {Xf[D[g0ٶxN n)a97=SIl(t$Y)RFI ='! xP!EF+7esfai5yѾx*F1ؿQ@WLS6n6tw _ g@|3Aξْ›7$ROU!38TYa8BXB(Va,ɞ ° IuI$Vu.)wq^uB]\ $!C52+ Q iMAO p.Tgp\;Yr~L18/R Z yXGqP.y'KΗ6&036xZ߉[5sa0tx)r~/,9B&ۘsA6X>40Z= ,:/W%EdԶv_%I|[^`;vJ^YF"\> f@!mL`{a9/ {#ůn344UlcSwȥAXPPO?M`9 *aGuN\D6&~>x\;y'K.6X&)Rz#0zZ_&d2up䝘Hp>=/մ\MO/IqL"\ Fjx0l8/˱@=[=^mɒ+"d &ObaF@ ,Eg}T]j#ZB&@`O^~ +5:IʸfUN0YreLЍEDtUs#N'&0!pUd4#:]Gd y?2vA}sDs$t :4^z԰ˣ2 {)sTGxדԿĿDa?D0sbD8" {ՠnAJۨWe5;{oK?= }:X͆v>}r5M՟ޖدrVo^\,l_=`[RO0;/bit7iyPGAV0SM=D0/wU;6,"=,8'`Fuͦ\L3 4 @$ ϊ3c<>pݬGsRC{ܰǍ/uzK9a džbjs8w\ؽ!׫%Jsթ)5DӚ T+ɳQ? L8/'άǁFzI 1ӓ~n? W.!w1.y,!Jŝ)N !嶰 *5js{$AqN I'rwÚ֯oApRhLФ N3#;&$=5{W$r%sW\lXfIzgnyf?O`Mc빸. ϊw0VIz^!4]]87)zqo7 8L$+#탇_s3 C џ*oJPU#.D*GP([[f [s6fm)Rxr%IB*JVJ` gW7Qp}_B _I!o+HMuR5X}A{(YHDJ`2?CjԷt(ܰ6Qv[ay:kη ,q#`D =%W kO!Eϗf]u%7rTK΂6aWxf7μ9ʏra##O[[3Wg'l|jӹs7Zbj4}ur%֮6[s x9WᮨRh \\-EaRE)p z% G6\hx9}cvןh=CSzȝ_j^aSg]]sjW-npj{-S9pbx^"O~i4󞤙@4A wKpA“hD0js쉖(T[WI/1C S,Uz::f{-M3⯩'&Dl_)8}^L("H͠Q2uO^K:s+DYZ,4"VOPE$ ~ xsȀBگL- YFu^,N$gHH#fA+n^$B#fj7AKtB#6LZZL&pu19ǥrź a,}ɻ{(|+4wp582Z*X~/z8K&geI QR^͗TGUp،_I›ZyiA6-*dU#w $0{fN6&-Нs;XR"ԤS`8Om/rϲ2K}xp F(e5_kOE5̗#^HbڅI?IB > #ɜiZG+P%:9izLk u2ia^7֖s؄)j+OUCBg!a &@ $Nj} q+{W.sQ\^rl.K{6cV;:`a!)+i'deX7z9A7ujz _A Lb1u+kD$N%0)X@@~z&ZjN(dR}1X0o Jg!o4N %,iv>pNNMO 4S55B01pfo@GQ 0fMV[7핲˟TZuXUEj 'C.y+G*4 prfCv'%gy6EfW` ?/ sH2.k1I ] e,lmeW|$ Zcix27|4vECocN;JTxs"x^[)uDZ5^#d@,Qy^ w"Vp|-zK?ԛIFх0k?_d5:c'#t}Z)>&䳨?ȱ $'_˺z)5,O=LIAWUE["^-;,MwO h?C=aD7hgFu &HG.>kX^U0?CUyEi[\"(/TSx<")) aF߲ #COhaA%pvzY Xt-H QExhހ#]3Znߨ-N$=RRSIp@ OO„k? q7O6PG_W M m/ɑ $i =ðÂ7S9?LX1aBn\ZA?&aB LDm{%tŷp'Vĭ!`X(g 2~yN:R$|!C,iu:+Κlv_$Pz# 4YQ4I (2 Z ch{VF)v3M{LR)4LgJi?s?7BOr̗|/dKrqi,v&wz1H <éR!W~b1{Es"| t 6ksL/AnlpԁĪO9[ 6r ]t% LnflzА=$k )pha:4XʷW\=zSd;#2p3Nb|pw$w4eS|scnepHn/f_|__ىI{<zp u[3yGhV ]3qM yOC&̸~.*̃ XÍ 틑7NK|[[WHZҵ@&Z~cu>OBݯxvy,fNMZ os ݶoR*f'$+ mFϔ)2c-zkKN(dh_yklܴbDҴEֳV^-* ;͝h/~uhA<:ҥ z&:s^7݂ö0@Kk_"9窇1":)c,h )c}x3߾xJT,( #׃G_Byc_p>?sxJSEÏr-lz q?4y ; 'MUb=jm,݄Rb^ /`R,OwĶ:FFDwKy²ӌtHݹIot}ͳX_/ Y)z: ާˠ fksS5\p&\l:FO8~L%љʆd~o:/E8uرe.j:i$Z\g3bӣubZ1Qq㶒( @͇;(ڣ+NtcuL[O-_WqOSmg]sK7Ԃvya :yQQ ^0ALq cq'ގWC`5,rO.氶5&2̏i66aPͨaT` Nzi]aZː246QxwX  ehodlWa6܆L lc8j8˜CyhuU,,0slKVބl$G䠝zdOhnD.`xS~ 0qzae%Jh@ ?M{]fW-vݴ Xkx06 c3Pu I y~[c!9]uKb2|~zyCXPw_|_^_'HOWNݒ?\p~E[pRXN|x$Z J@tr@!.~Œ wm!~C7KS70Ma.I0p9>s*O$bUa x N`Ґsx(vc5x4u@0%ɥC]*so ޙ?gO^V^vT';(}|}8f?S=ao@d6$^*E$>|0%R0P}URn$4'0$p-}sH [˺ۻ ][ H۷c Ԡ{Sr%H؛:VO`weW5Ƈ? W3tqX^ bPȳC5 n]_x( X:nbs4K PzM[$ՆZ,|Ӷ4T} Vx@M 7.9w]Ҩ(eTp)/Cs^/ށ ؝Xo Q@\︘]Q#Ҵa0o >!HX\h'O-#<Fƒ~"%C|vs0s]h[} ʟYb3Kġ'&^npXBlUvJ+o c֯ЋL$IB*S.e'*ɼȭP(]Iκs'4 vgCgs|!8YhccS{:XQ5JL[ 'աxFYw:`1p|&Cؑ;CNDAoUmFAMHv[dô a#!3.,Q&S :4g?ny]ݏ(pa=xX~?\]S_»=.4/Tؘ A#>^~-I x$wp`!癋;]I)$va?'~>~sm3يb+!qxB;B7_dcqiIQnzzGUDۂΙ/0&[G:^XKt?V|H' @Qx9سĺ8u;4up46cH&f0 ; [0CK8%J4OI\ 8< ~&CEn\jeB2 k~as?)1Vlii1.kp/dͷ'y:<LG QnÀpf UCtdžnv{D-orc4ޫ/KЩel. ћ|CXϯ.u>n|BRFFJNnz*fƊ`:?:<`&㾸'+ 6< e0.(  ֎7[6OHE%-9-V;sE~ =z`{'FN-T*$wv ϼC~ F#:̐]ӭHEZ(C_a VI"O-Q.Ur\nQ> =W.}N|7 f)2a}KWJ`86tCʈ]T"RVbw< eGWy45/|}+N"咜e~=} mPtSEB>C  uf ħ󱉱 rݷ-2XFnFaYGgTmr< ڃFs!Ѕnrmƹ6KbT[ǞH:wD>@"~ZC_k̶ӝkNp(_d>x2,O$EM|$zCUY#'<][8t43f\h}%g{ʥ_4v`jts 1vܸfF\q?i1tt V`ڽO*a0Z9@]uD4s ~t{ o QqwNu'?QrR)PFrŠQ2)ۅ5$%۷ILg+xjGH/(c YD ٰ{d1MMٍhF "/rНFFn VM*Й{FXoj,ú<hLLtgLCߏ>Q<tua,STSǦ 8rRbvP=A-6%y1b+2?\S>Esh:+4?~ET?b ZN~AR- y~KttIy٬rE|ntWeO.suwy$7TmџVI-T`)c3ȸE=5{u. T͓O-RZ82nor%D&UᾜT~mrv Ӿnz_ LH07LB2O){$|o qթ8GXLV8TӺ5Խ^*{u*]̏hk]|aM 'Xݐ߄iˢ̨u^|F"H]ᅩ\"N@/ DOp^īJF 06刢,08p>E7 UD=x딄~w,+OQt39a Y-.*n)5y`H-hii YPY{;-=+):¡=z> stream xk`!h@b3228`WuYL@fb`̔F>fF"U;1 VQT7ЧˀBMIiMBA, C 8d4H endstream endobj 539 0 obj <> stream xeXgxSҖ{d#@cBhC=b@ll.[Vdpw w"F7&1ŐBI蝐䒄9 ܛ\У;; zB5ׄ+DhF/xo㗗ppm. N!X!"↊,.f\z9Q  ~ %]%.S.NBˀA?m2Dv v~$y|4*Oo`OUMmqj|A>4*mA[{!nCԣȖmc>UU4nǍhq?67TmVT4P?טm`7TE#|;N[ZPIG,+pHR@L*`൐Oz/H[P,tJR!-( !|[8WVXB2@[-݋|J*\/kyM"~{7aZH7;Mp*t>}~rWڷEf<ɮ~~؀ڥKHZC7ѝ{#{gXu?']: IG Bxޓ:#QVпTF]Bo"_*4*]640@JZsKO-o) ۿ`/BF˜NŃb0QEFUXUx,owbY (l&ZsC'1@ 0n/؛9Z ]T>rCEHkے"Xp.<6%؆Á(j)MetzVRbc[T;"E=ͷN1U RSꈱ E} 9XVAb{t@9LA]]Nied?Ֆ @%6ow`ryUpMqiu I9~@ N23;͠oP#pMuPrHj h{A .} TZ_X5p*Wenf3)UGS1AzSBuk K߀,`0))[eSaC=އv> J;D@yX6J)oMǘg0<|?)~?)зy%(QzNtt3?)C+$kOhlޝ fspω{nrxSwd>cosCM{fiy7oXoڳ}! ⬢jT^=;ݾ7S4# yϖ3J><+I!Jl;\i]Oz3vZ4A2e`pk{Jfn5M33:S=a"ͲRFd :)x!O桹&q ,ϋs&o]Ng>Օ(,60N 3B@F Vu3(ά>v[PlfA|!0՘܏q}lȺJ$;M6H5]ԣ"exKЙ-$j}j~"> Jhͺ>'FigVy^ß8(fuVƆm"~`~`OZ[z.5b4`.xJ: ($3ɱq-8f vc%}j] ,ևK/TT߃ltIVlelA#W:/5=.iJ; :bV1CjN,; ZM~;=dv{'`۝\rk$WNaF 'SIy $9D$x7IUt7'e%IgD}D*,W栮ZjY{AĴS#h\9jrOCf9]u_=y >{4~).~-]SV/}*:u)++?î | 2 _[3%V]&v%އCu0\!r_ o&XoC12xŝm+;0(7s kgx[bRq?NMw~Ll91MC(}uhWmOV-1Q^X )%lkW3 DҪ.]-m5GTe3-G:vdT2{J~C]㷙C,qg@ _dʲGOWzlэ~_v#Y` > E3SeY!XN"-V&$fZ#v)sMjmbrɶfGJ}0>£bgL<.7FZ{=}HsgY{+0B<_#armnd~PP|޽b9v#X6GVyCBZ6ޥ[aEq6MHyPBéZ>Ѐ@&2&%7'Ƃ 0?1ۑb66E 9&⩽ fqo>ҭXҾ'z^ٱ4bŌ\V߅ME"X4Mޛ E۫^ɶd9ɀ_ͥ_Ix b˾1&¢h2^@`-X5ke0h'9JmA螽)MTY]R)*Ww#l\iRK6[u>5V2afoo'qCzTy3oaޣYWF{~K;`$ :-/-,?ρk+›$6]Sx2X8>9עgct6oVyH^DU'WV>W,ًB7m `_֖V(Ly[[:}e X)꒷ / p> ]iP LDY6֜#JM-כ6`Gxl/Nj/8_[HDs &S|MtnSu0]̿4Jb*lDbFy=Bw| c_xB:RoT3Ԝܔ { 8^nV*%敧# f@_U̱&8 @+p%Q_{ak #cm>I*|{\V50&ZTcflcueu׉"HūPy^ް3dfFsn:C^b~Y!o)ܯ}P';ĜLqxjw0EgN<szT7VHhcxE. 7_{&8Sgͬ__///ph9,1'(>䖷 V3df6(\I&Ud/;fw _\Sih$71?1-'D?6eG!)o:q8-9Ko7vH[r":U.+o 0Lj@ h 6,N_]or3˗܉)%eyeQ1R5g&T #@Bɉf-\nj۲v~-k^V>PyaءQ q%Q1sH) ńo*yoәBs|fV^Z=m'* ʈ Tq۩;+*6 *SHR|!,{@&~>a\L= KL<[k4tP]|o7fS+l ~B!"pݙW氱& 0g1!+#!+EDmp 7G}~0R]AS]P&o2ؠ֙GWN2zbXq0 X tRSF$NHdj-)lv?t`A{sx.r%; Vu0_XFtƙPE=(/?))έ-7[~Kq2B:N& cDݮS؊>cQưpFei5v5UIpD',SJvơ[ݘF*>E?KġoˆϜU{&V+7U{+/'j~f.ZG'qBFFBӑuC:ێ_!O4[k4bҫ1 A-7PBu#K_'By>)B'e^$KA.EdkeY$9e`@.P,,/,yK3|q &I endstream endobj 541 0 obj <> stream xk0=@`j`l8v endstream endobj 543 0 obj <> stream x=V{XW$1" fK_E*/Ak  VPP%K FAJSDDU 9wwܹ@,`&[5VS e.'srbB} މ=像9fttqz9-  #1U-ՆhbFMI6gi*ZV IiFml\bOOoEXFİEө zSk+Tz"Lڍ*ʩSA͊$bQbg#>*ѠMVI*0kSdg_hЧ(5 NF&S ň?A MpJpZP+x"x!E;"'2M a*,tJ^P~r!/QX(z SBNFd?M#$|\K֐exfx}n0cYcuKK U,<wsD=ߠ6ܕCiu%i[a:i<^Of董YVyi>[~hSۿDڜa6Yssr3fVv<;~wL=#q@#!xJM=1xe,w}"x9F{Htq0Y[ye%'nT%-f1DA{ hN;^׆+cdOs:n=%& +It JͶ G;P:CBvw%뀽Dq|hXɹtj d ( dȚgP^Vv20晸WOxS.:&sq~(8՞ 2DC%aFqH{dz_c*Ag u/krT=>| ?]nCP/Bn}Ԗ}wrABfF!W @2(b鏢PH$ ?t ++˜[ysNJ?d06&QN@SWI. v]OІ*j<9 q"6A NCv/ 5>ޡ1(]~ 'zEpIi܌@ D[S-??Lm|]8vW L @O?VZ{WTNad*1M遰vhpBv):DCn64nE% ydK}vCy\VK"/eH<ܺ\x͉(Nmڕ{Cvi_ėu6b;Redsy5gWJ#O$Bsм~)U$uouP]WmS)[} ZRFVlkM\"ߙݮUylQY]_DCfAJYʔ0T)ϧD.[a9YUqD>!7 ؎]{ q[A'rH4 >{Iӈ'D3V1s5>"pht64c,de1(yrvmp;CDwCPwGLorIQf$_Xx6zK]lsDܮCx&;𴭆ʛVzݟX[w#쮼S pMw~5aQ 3+7̝q@UʦNVVȷhK"˵hU*5 |8I+嵕יK%fCC1Y.C|xAFOtP x Xdc||80עL(?ּpzRϝLZ|ڛ@h'P?.C99Θ",oOvXlc;E;I;H ی처O wxkgXh'G W ?Μ<++K=օS1 ;Q="x^X D"o8cs_\.J~5_M-A"'_[^WYt:CwA^~ X~*5n[GHglJed"7 .d8}K3ܼ#%rYȗ$;:./",oe򤳝,' mE߃J9~guZƚZc س3EeIEV@WD|"2;^2ϕcyrۚ(,@ 9$oh}(AQrE:jtmRC:k0 endstream endobj 545 0 obj <> stream x;L , `"\F82*@ : ƁAI@9$v endstream endobj 482 0 obj <> stream xڽZmo7~.F`!8 iqs$CR3$eKkm"9KZ. @0n``@I<0|_CP]؛yGT՜u8f\MD*[V4ƺF!#]H3ү*D_t#v]25a^K\<6Cľ67*\V 2c´5#= MbӋ!Df@YnxӀCؘkzTs= SpĨ?՜jE[s.ʹn˹]KU'iEV cZmlݦJW1nw٧FzXUn=v'=ƙϻwwݻwZ;O:\˕rZ# kvA`^gϺOM}xȦElx=^yY<(ھGgҸ)=,یy7yѽLы?ZUDˌ2rvQ6Z9 U[귶۩`os6ZmSNLW;1A{M,WRD2_iȳ]ʔwU#iLֶ-K(:SHY+gIYDN\Z!t9FsȒ2"9rfX<_CO9C9X-n0FmX aPD7SbKc@FqQWك'Sa9?r-8h-A' xΦe!!cZMJ9VINa<:eH0r+BNNf/a7.JYQy X,6V`$¢>5iE|}3S!WE6s]G 4}j# ^5ڮ9W(~p G/A A9`ޔBMLk0ZUmyr6rmy `w% íV2`2<`a )w,Br-bpmfpmaW2m %!h / ЄRR X^G#L W׭ `XPhs3AUrg7SX- &g!Lx*Q[_$ F6-_ ǒ($L-!DTe2g@8:b)l%[2``*b]z⢬k41Gc؉ Qc zY$QA PrTG @=\ X7G!ֵBe#6]!p E;<'Q"3Wq7ceDx C΀G'{ ꭏX%YP Ͷʟ`+1ze#O˳УGEOӿgݧx.|^,'w,gvn>^;9a|.w |(iXmm;ř FR+ClFo/*诋s,.A2C[vtzrv4=_,ϗty5=aM/!(mL.;a)oi=-_jg?qlvw hc1-ew:y:A.g9_Pd;g}7y}<]҄>iҷPdM}X,FR5k:mmʉĎ&ݟ7^}|d>~>ξc)'63LI֘]e pCisM]#aFʔۉp'Q@[# &Ix ߗ 6Ayߎ~Z1x{r o+Ji=lD 'W\B9R-GUܣ \gdL؃1#K%|u]4"S9k/s l\h׿=yvɋxU( {+\7>q|?HOS#=Oz4q hov8AL=_C1݀勧/v|V|xXm@tI46O۴g:y_6bVY@v=9{9nɁ 7}4hh65D V^ Vd_^Gy}3.f|)}݉/{Bέ̀!K#omTSح&O<&{m/ endstream endobj 548 0 obj <<45e8142e3feb28b82cc2f9bec68e6529>]/Root 1 0 R/Info 2 0 R/Size 549/W[1 3 2]/Filter/FlateDecode/Length 1234>> stream x5WLg珊' EA!@d jQ(Zڡv`VuT4WiNzӤI3|7;>Y N Ll<cfHcD"\ycA7,e4qAwp8@p4q8=K{T7MO@E?1Y<'aNk5bMGN_cct1-L p1i-gahsq\pƋ)(;YK0SԄٳR9KRLt,l\S{^mWӕXEw#+K J1wX5bǬ]Kp57p-7܈-܌ui{n߷#o6 wc6lokm6i74myp6-4 Mޫ&T59D3܆g_=«b~ec ss-9I־zW牸v"mA@;64Y^xLya'Cp00y]0w/1 7N!=r#xu>@nBkX7 ?X[ /Y PKp:꺺Ia8 p6,:Q?!CcPgX\:]~NY9:K a:f`&&J\:$Z\p#Y_}P7oПyIbޣXX˱uC.Us[Z N·f8D/zL'GA4OSQzy}Պ-a#xq 1], 9) op <- options(warn = -1) # hide warnings from fitdistr() ################################################### ### code chunk number 37: modeling.Rnw:621-623 ################################################### library(MASS) fitdistr(y, f, start = list(shape = 2, rate = 0.5)) ################################################### ### code chunk number 38: modeling.Rnw:625-626 ################################################### options(op) # restore warnings actuar/inst/doc/risk.pdf0000644000176200001440000076036515151412451014736 0ustar liggesusers%PDF-1.5 % 23 0 obj <> stream xڭYɎ6+fXM d䖠oAcSR$d݂m,j4! o ]/p=|Q+oʆq5X :hI_G4F;Dρ.^o_ɫT$SK+~s< |\ުϏ_i#&B[G"v3c;I\P9L;`A9GdUn0Ž TU|bW'aQ}m (Qw9C!r*7iќD9Eyb EA,&.,bnb$#.Qe #*O@)-[lbZ0IȆ&l6zeF8S 33oh_qEtƳJO5/6+W ڂ&5GqX¾U`gT~Eb&ڗ|5ObutviwjyVgnoʫ\7xMn^Q2k?%!1GWp [(IP 3&Ee'*ĉ1iLgXUAT[&| H+Ac,{a辺f_aK0j;.XnDQq@YYݎF2ټ厞bskapN.顭]n( do]{#6Mcs'·,km/=(#6mXvܚIU7kc21STHĪ{Ċ|j6# UEu2zC3QK(\iw3a]j8ez@[ZcSX9`ybBm%<^ulEr={UypڲHYM8<̷\ Pi[ :|GFwZz:`ؙ-`uԣW5ԤT@>2䣄e}v4ď$Ssx5*n#?B7^ ЙaōWsYۊ5(92]q'tfgod2%tL-!U3p} Sk K!de`(m {#8-Jc}pjW~R+8GQWV}z`0=j<[vϽU 0[:q"!bQCԢnCwYw6t.97aQaM$-A0d9 hNrTН[||Yt۶LM:}|r1?xA_裲it}}C2֮}˺kjsw|Fb} U,A/u?Y:XKI]Eqs>rQ ݼd4e51Fpڝaac*\/Qo7x.< wo UNjFfJZK~o3Q)QosF/l#'ZD.ΗβOo[0 .r-*= nzWvT.6xq H80is]џ ZP,\\*G}=ej'QJg%WBۅPp:iن22oL2q3c!ฆn9mtLu `*+?~ H&}-B} /ܪLҼ MԶŜF`wj\mbrrڑl(QlprhWqacp<\-o$^ >`7l7 INP jFi'C:txiGvx==/Z2_ *6a9:\FSOGjym/) endstream endobj 29 0 obj <> stream xZ9cu ^R;PhciydTq%C1/IVόlvbOtx1?3ӟJ'"@﷓Qz^8ͧKWIdQ#}{cOF&[rs钥8/<^^:k~)m/Ghgۿ rA@m"|mVYYӝXpqLqwX疅/6'./#w yLŹYUaԍs\ώjDR}~G`]PBH C4C>J$<6EVraY KٲD|>%M:KL?NlLÅI~fU !LEҽKu#a+HLeGē2\>gHvNvb'28X87=u>2> hyHU{)4gfEZ(oVּG+a캴/}˖^X*]VvFnE% E?;nځ0<=g0טƖgt+&O7 (`W݂T5(I̸Yh|˿ƗO,32o$Ъ7c"U[K0)u'RlGy=m>]$O'o),,fwﲰLjfrGg_F$Ӟw^\\^~rr2d:VPSO:) ?tͦoE0v5l`՗ݚF 7 A 4肊3P Di#4 %nOE[[G"^|d zƇiMko5H>5@3P [3-r!ߞ-67\ ^`QHxZ<1[n^%ֿ~M i$BzZj| U̼죔 wk0BjL|A@ͻFqtI1X*Q8J!VUGA{qB{&ZDk.~ :v QjZQB:=J>[^ O+kaƁ8:V{xے{n<5W|]_焽ޭK;T1XƥHa_Pg@ % *%Syv*1*ឃ#w=(=va=Bq\B>)P*92[|r\[t#Xc|BXzN:7M$31IIDD@@GiT=\EՒQ1&2kZغ m5ΎXN=`ߥ%3RZe*P9M؁ ̄K<S\B%aiYFUTc-C{1 N J&0D9} t=C_ZFAqڏR?S ?XH Ъ4f&<>)ߟ[CÙpdd1[J'},k!>׻kT==G/mTCf]*xF5t /YL/ ‡qa#M 5;9!*I&@&#!6hYIU/xx zqhube)U*> ͤ=nOHAD9Jqv cMK*zIi8SX sשu j=NT ͟-1r5`5CHTJ#sR*apm{m)JYCj)6$iJVٲc}Y_rSnIJ&UKyI<|߮Kb{H4J~}4)#\ m403ůEmLB?&hP'zGcгky9˧SIYyoՖToalmb!{g_B9Kp] nbq+k:Q5ߎޤqԼ ة^D/2lwU^ڹly7 l#z޾95jб1Zza&cSiy_?އ{ [: V{7y{TP6<^**O] B쇊} V)9~W\˔zlC R=Zkq+̉<X9,szd9R,Y}!?Bk%ҕWf[.% ҿܵ&=pP-xٜRwz^j)Dlq ٦ ر~|Sc!v0tk3ג`k=IuJ_EΏ^3\\S} 5s3܊ʥowY]G?ȍ"DjFA%1qko"r}}]]s8k E@u ɱ endstream endobj 33 0 obj <> stream xZ,7+Z ht0m`2lW'ĿoR֪u2{Ri(u ,>ow, Eb1,;7jw/}+{_ˏ;/r`)w{x ҆^[қX:i~sIeʴӿ .iIh"Ap<&㇟|Q(G6FyaGKy,ct 'Eˀ1Bt 3y%,2y4:P<%TGP[a]Z?iN .;x5 , ll|^z/LPuvʇ=H=wuN()s»=:sx;2*Se* NXBwyz;H 8o]zkp߬}Qk)ph}ّ=7h4i ~ntcA@(s{7͖VĄ4L)^H {(5`Dd0wNPBcUc mF-ޅzR:7^_ӈ0V8 UE)`g0co f03:F% *} ˣGԸwJWr+BLenCu]#A!|]{ ƞtY4|c BueVݔ]`iAKsn*)փ3"X$j=FK(8)I8sX)PMXLuNbR+P6p:dF7Rm""1 b9 <$r(. ADq[g'ݺ3mi>fųhH "x~^X'M G~*}!B{ZcQۢ Q_]PBR\֣e1S-cbQKHI#"2r+yeO1OI<:pF1aF#4DS~]yQ:j vm-Q_(r ͠&Rm(J\k<̪/bҘ!\6(%A x?k#O-j Iy묗+=RTI"',д%y:[4Γ@jEH']'EZF oa{pt-.PZ,j})9*\'a+ئ* ܽMkf:$-{}5y)|r@&'kq,,p)NѶ+=2qv.fV]H*(=ŮU,=D Ĥ*I:6}#aKƦg!.fzZ41Zj; 6y'LhbxWYf!R%M2NyکP=`O$ܖ)Y;W[ hatMAc0')__ܼ-gz 5d7I]xz)= nrՒ({dzߝ}ԓ5MJWf`Vk my%9?L|x m?PeG jq~Smo9r}/zyXQ.LYx}*X-oxsw)IhW'<\Fv!9L->vO H]-^HV8D|d+[Vh.mr:iL|i`rR5S0PR .K(w 5Іr%?w:A+Xٶw)U1ݶNVҘp/N}F>6o[)?tjj<9Y {}n,6JdvwFpGS!}.)<9CזaVB 2d0K=4 oҁkW2"9}-a9'pgu[EqF ">q@SI"{ I 2J(ӏq9גI*v#fla[m1w ^( kf IX߯CG6ame(y9\$MH᎟J)|}RGGnƗwGtJ4B$t0U:Z߄th2"*XJ`FL+QO%n%JC|$<]$)7F(tx!SqѭъIZ]y>{]M!)44VjZf+Jmtˣ.cX(BOT˜c5v㦨^8P?,ngLdlVM}Tc k^wE ڧd,ƤT^Ȳ_< X! endstream endobj 40 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 41 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 4808>> stream xڵ˲u)8b7LSJRe]$-%%R^#u{.WSSms_zlOe?ҧڦc~|9e^e*L{eL_=T-sir>S+U:{jmTer" m yQ=y!;.^0wOkoh#Λt ׹lS[ܴQ掎6C;YdvT\& u&CsdRNacޤuE:Y  +RL:Yhx͇t^â1mQ4#SKMJG[= j5yNm^mŃ\]:ڼsGMac>sCG2 T!:XO"+V(Nlo|`dlґL覥\#ЦmOw^;Y\cU]C _ bFwk+zm=ˢZ,++,`aA:WE)2]MyTMީN[)R-%W<^(W?dۼW+0Ч߱Iዧr] Y}ZfM_e ^է['UUOv66h]E6L+t*`YW՛7}onY6^Yسuشw]=k1/wݷ q"mU+Zwru+;k[vU n+z+zjoXE` րVdE.-[Uc`j Em??._=vDy[бE5C,{2ƿ{ ~߬*X .k-k2z?0[?O|dttHTgӛJ[?-DAj^>x}Ec|ņhɓe7KñU3 M>9STp$zU0$J &*+ y 蹿Э_Lt/._jz}uk Y>Wlʒ]& RLICRπ?d5_BTK"DB0w˷?b J]'aK_1_HBc>>[|c//$'(chO~#F.|h"FxhOk2FCڿ~KN?Ne,%?V0ECi4GOTmz҂/ wժyA6֮h*wUѧ^Wqoʒ,kNuac hj-߉ t${V# V rvf2UO/Ն=e@*+U.bT#9F 3ҦiswzaY!A" @R𑭷kt"MNbhUojٱ:ۦZz>-ו0nHުHo/qy[? gnlx}@J[O''u7]$6U96 oVAUYm;m\іǍg14+N<:K{dRS@P@uS ԭ`t3٩6*~f;9(*QNgSd|JÀI O#x*Ny18AHFǨgS6L$Ӟz2禍3D38Ծ/O@}ji2p(? y8F?ml)'iKO+e?Ic@B lP0 P ,@X( f ,@X8 @m-Bbu@XH @XH p!   ]3‹g/^ @h@&EH7b$@1j@j1&P @M?JI`fg#$95.ݫ&zc9`\ŶU5WtUqMU'r B< {Q0 jq 5|~ aнJ{@ʀ@P/`?>СU*!DxE2R!g%>۷?~WBkHQuIQUk 1GBy"g#ۘFfCۘ)ecۘ dc*J \וS8sz1A!ٯWT"ۚ{[pm-We(ϑj[aDlY\1Bk o2f+Ub2m Ž/R}{yTYbEa/c "Kf8Y˻ؼq\Ή͢(ϑj[?K; $:+x,F3H5/u1dmC%@+J²N}{gٻht[;Uhz*7>mS9F+%v-mςyo芜 _p0L&1ں- VFT⊡hk,[b;m\іǍgy_C,~B=?3ϧsPM\J>'duS<坒G)prNSr$~ƽlSrX$T4;{%LLH*KgL2*p3U$#1O&7y~!T{}!Ti {|!T'LC)\)`3U$g LC"o%drE5%WeړpFI1 3FTw-*`Z>:}aoemQ{lQ䊊ϙ>`K#z}`2#FyaO2FEq󆌳GP$Oc:S G?HjIOaPo~> ejw6we:%'P~L MP1taEa$Iz5LPQE;zO2U"qW *J}4֎&xt 3CebLPQ,϶v. ڬݪ'8kLPQC;`eWzp~ֵ*2Ae+7pmۼ*2AeyTT/̍H܀jrʶjZ+Zv~%{aTH-2AY8 ^ Fx<#S!VdPOAeXOe`ƭOAfhƍOfpmOAgxMOg-OIh-Oh-OIi)^r<%qS<%qS<%qS<%qS<%qS<%qS2<%qSR<%qSr<%qS<%qę-O o-Oo-Oo-Oo-Oo-Oo-Oo-Oo-Oo-Oo&qS<%qS<%qS<%qS<%qS<%qS<%qS<%qS<%qS<%qSo-Oo-OoƽAo-Oo-Oo-Oo-Oo-Oo-Oo9?B@8 !N:oǷUVpFmNhqC -Nw8=DrqӋ$(?g9sstHtttu먎;:Qt;ncN8|J)gk> stream xڝXɎ6+bj 4$19霌CdqZ[z%e*ObD~2 eekM +,?|ߑO/ Ipy/d3 [^n\ 0"C!~?_~[~y9 =xgpgڕݔI}0@Nm7:|+XlMj\A}o}>]SАJi>z~:ڟx>TILNBEE[3W[u7hz`b G)ZA /fݚ5+kpj^$JTs>u&;P12PRcq6Ymh6|N+nr ")+@YIG潃qa"J۽uYjt8!'$-AWr3SK{VIa|V6)X&Hpx5@6 Wr$W{ bHjÔup`ɝH T<&J  RC4PJm`<]";4' 0-jTn+Q#Gxk 'HQBSF-9i#RpƧptR8L(]3ef.3xz:!L$*$(-ʢ-oV[#>c @0I; (J1{@ee Mul].;sz9{1`+kL=FW-GJvӚo/^=hZJ1HMqxVNx_.iJKJTenN]voSXo${$zs_W&b N&E[zUZb|W"?8+"Z<^|> stream x[c D Ru*{=ˁZ'` +[3IW| xpr64#;_{ӷ :9 ۷?U ?٤_a Bi \䩭?KS_]?jz>'$9Zeuh7ŀ$2EYz}jOcm(QQ-4j}e6O=T`B[ rQZ-n7Hب՘Vӄ!7XMF )X-V$.p*8Iq̈yZYf a5=a6B|`XəٲxYR+˂*%:+ts{OuJ^ûWI(lCi?*(}GguugvDV&<%2}Q Gti?~E='Um+[>u}ºE@*"]hP|4Kmf3 䁷?->O Ie^t3sEcoⱱM'KG[w rjO?xjS,KwqhZxN&M+1˞ @I֬yXq{s6+l=;T¼m˯SY?>kK}c5oƍml%o7K鞞*/ݵN{BݻaU dۖO3 Td{F!&p(+aHWN's]}]Ц(n/ KwW(bkk_7Wiø7h1ÆpHuMi^ntәPgQ5X195GI(9a1J ޵2ɾ+"4;ZpJWPK =TGB:ŵsy‚$7Ѝ3/嶿2|L0.vV~naA#N =8Aw2d㽌5+dҿW-d2D^_1'[!'ⷸeeQg3uNnPP/=+s,"c}ZRK9#!";}w~K"veq+Kxi@~IN iVh !xhxDSറޝv4!R]*?3#&/_W/myT$ư7*{5_|z<~bẾ+lKZuzCAj9y3qY5)Z rz[I3BL*}.$30w;pcn±m# ~[?s\%j+-1V\I@ڈ/?pcZvdk:RyO`圉4x瑪5:uW-v{]^Jxno{e $arv?EꏷO> _s|NY'Pkȝ7]Rzw%P]]UUl TUE׸Z_b)?:r7S>%UX,SH]Uu~ZV]#L'1eeJ=[BbƠ 黝aYvc@1Jj:v̰.U:߆!\0n4O`qf˺10,ձזw _*Gd,K>_C:b9/+}U=&9Q++ϏV !nΔa`7{X#h͖+VJUWnT,w~*$)8{l؊8YɘJ/!g+؃CoGLJ:/jϜTn24F=B3V%gl0HƱ?6b}\zYgl4I#+ibEA'W5k̻Īȏz8ym 1sjZC+i$f rY< X'ėCjHMܾMRYVVN!8J}3F+M=T:pI8{nsB؍EHlrwi )8[CNxq![ Og 1Qs[ϟ4܅!o1|lSf>k+OYC~&;h݁D;ve1xc$$I~[h-21_q _Vv*/IК KWw6,3ܑTWbfJG $ .$U+KGg8S#sлn~9f?J8'&JIdVXñ'uZ|>Ooybn"L)+*<~p; E'(OV'tk[.!dB|\i2 H/q7H&ד}OT2T|!Y͸$lu$U8~qVR؎hwk%mq:-jB3Z/oZql-p8Vn9qi{C))֮ClYpŽvゃJ A\ OGwIUNwytq)a<ƺGܝ_BSfژۙZ6ҭլyR}N]1IjX&2b]Cפ8hq׈臜R=y\o}_?qt endstream endobj 60 0 obj <> stream xڭ[K$W(LT%{%֞Cw={;"Iw4[|v,?\_߾-,Uoe]Ua9Ay˷ZêA0Y y]w6N[mIz2y4hyu4S赍z ퟴD ݡF]9%탮=GTxb#!h8V%'+Z;Ncg[Ƹ(gc]_".xI7=:fb.4ʟȳRup{f*!DLL.`m$6H\Hkc@~7qem\'ehXWDf?<4q&x`cq;}H)꒸FYMJoe_ #1xVkl&.UoU1- ( U-SLL <(4l)ٌȮjKtզ\z61NL!a40ɲ*ޓHkf0^ U+YwN ȕQխ'ЬuԞ4YIU~H|$H0LAVfٶCHSEwcx4"qbkp1Ag'Jo9F}WMY)R=uժ/h (3{TZ;~Ck]FBKjך J[CCK7yx86)K-bgB4B;k`d wEvZUHlrMFH!U+x|nLCL*  %[*d[]DJ{t)iZoIӭklrp>NiAULQY)c@Iֱ">`I8O[kjH) McT`}g9 H-b$Hqs V>J\ ҝw oɉ>¯W8Tv>>Ҫ?40s6NŨ=s3A\ Xþ<7#[׽a`c SZO1^;XQ{_O.nd5MFg&6[b5@`4SP n[(gnRs*zӌDtR>Gŝj; *޳$hڔʖ`s:v3Se- `e`eF (X)y1s久ꥨ]_>$gpy\G}n;C3?[٧D!T&6rk '50!2+ :|AKiOAޥ8Vo81^0l7C; cmM:bֆ֖yI{]YzƅrIenyV*Y;údYʹ]*K}"=,dUƎ BBY-ᣭ]Weg)ػ!|! t󽴽܎1z̿geVTSYaR+K;"PP퓁.jK(dµ0etă  , y;d#ﶵWk/^1(Zs U\ӷ WKܥkOu@"NlfnW)Bk .AR-QtEW ;KNEG6f2* 9|f=Ğ(RWbH~60 jЁIjS&M$qSom۶HMJ |2$H*Td]|EwF ξ:}r% >za؋;r {R_uTgќtnȌi(=т)wjrhG9Ydpאpca_e#qٖO`H!=N+Fzr7S*ݺP7TAXL''iiτ|ySb)4)^+8խg!Cת>E(dcҗWXBYa>}->yEe714TLxBvVW(g; ɌVrtg߱^&h2΋@7$u Q cB@Jz;s׀Fm(&\X&g"Z<(Wr'w$l6csXbn'50KHhO+w|~BH>7u<_b2 endstream endobj 65 0 obj <> stream xZ͎6 )QG ܊Ia/)Q13NS1П(Ҥ?:,_h|%ZgO;bU?*fXNF;\X>wI*XX~]>VԮRZ~j̮AP!#-z]nm|S^J&Vr-ϗZ[}:!]qgHrC#ںBu],~Ikϋ*D)|cMO',qg=1nqcdlb[.jx-brT*'[ nqƛ6AgqfEULVm':ZEGn2ɲ3ڡMC+~0\%hihzLϼ)h <(wMulm ]quKe7k%$.O}_(i>2OFوbzpURؼMF,_۵qq[[z3bXA/RBajUB.Wsv%zoUt Oy n⺣ DlQ&DlPB9VqMb|/ߋ?t4k78{_AElIĦ#qheqUĮ"vsA>чY!:u1A@dbA>1 FE"L$El]E&bCt! b~ ?b ?mFQ]N"6,b]E*bӞ qI/D i4bD1 >hXDA>(bM"6,b7\p*j$Hb$1 IL$&A:$Rs%$Hb$1 IL$&A:$\I"m$1 Î,fA_o.MzooK]3HGZ7loDn+6 sEi W;QSoZ^N^h= Rs_/#1F@!ۿ|brPڇ)XB_]m:t7I],nlq*0QB mA90ͩ;󏦹wg<87CE~8gЭ)4I u-jwP{C%βFG2vmBAY7ʺCw|^p[(i*F-zaFw.S)^++;cf+m;YJBz>3_H5" -jxrSIf t+݊{[N,&W{?çMq~vH0K;+` |=4EC7a7Rd7ȴmA"h*vLU>u&l泊˻g؛$ :H-1]~/ endstream endobj 73 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 74 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 2841>> stream xڥˎ\,f_$B'%Gď&A?N{@eH ] K @-C?Aݬ|MV⪫11P+/sSIaj_.al\=ݐwo~iiyE%y$rkY Yݓgڞ"Q8 y'=dϛb N=??uNmwm4=_:6MjZW__ *)qkڤ.!/Pzx3><1AfPk3n0%7cKJ1),!J7VeXGتfP),+V 7iUz37 5j:-qJ uggim@%a^%@Ɛ!*PT8vE@"ruuvD@8Y> UCN.'F^CFVG"bӈ\ piM g@#kYjDʙ߼lڴFf ˥huA#Y.ЭMF`[Id~Qj"`SFΑUQ^Un6&4X+ݻF Ј*r- 4"Օ+hDԢofRP+5R'A#k t Be 6XP[ƦrQȜQ;FD-zEOH[QZ~J vh4,oyЈ5V~ QkP9:,K=zhvE5MZS֦UHX|p5KH XTrdaBFVeÑ{4"D^ hDԢX'rfIĝX/%jDԢ;ߗ=Iea,?<hg:> uxѦ`$wf endstream endobj 75 0 obj <> stream xڭVM0 +|JJ8v/ٴhh]%W ?N?~{|W!J@"81  F"1)?ݿS`lHPhyY/.[Ba61K7C{\n+BA=% v}wzƨ4O\/-^X]2$9p$T@I0'r;qev3FKvnHsZ'zj8s,rUy=QWBd)_)fq Xy+eC!+7#$ )yI-e=yDή^2@ɨafdIҍwe,CPF81%( ZU|* Mm x͝ "pt,Jg"#lv`r| 254fzbٝ ա]$f=3!`ߍ[SˮOW9(~S|Te!VsI2j69wlKi؛DÉM$=i}o'o_!UOղXܖGmfm7FBo~C% tu4ax"}Zmp &GdlQژ2WM.In&LAe 21g<&InWIkׯ|)5?ۧ9rj@w+ endstream endobj 78 0 obj <> stream xڝY͎6 )qO P]mV L=^ebiHd#OEB %@_}#vQ;pvN#Q*J_B||eΡ 6/_ǻ Uy?E\ JބWV)^xgˮx o~P(!hqQ~q&d I G),^(Q /@(YȘ? L"Y5 x᳠PS)+jXZ vR;iO4ڕӝvlbXxGi\>s1 ,.ٹdn cFY6**pڵZ84b1Ȗ퐶[BVG8D?g"GSdC&F]؛ʷ_%mepi$qs C_!'T\rGXK"ml`i ?G%TT!d E5> ~kh(Z>0is*?DC*]z_5R4;\ ]ԛ* ڞ,sQ]ԛ(և^Ly;Sԇ.iX[{*P`qhM´B2 a@QyzhJ J}C*DsOLP]ԻpHi pJd{hZG3ͨ> stream xڥZI+WN@!@l 7's rmKd4[q0O.o}Ml^?~ëo_ 2ܼ^Fm'ApyJzWi'~Om/'{¹P@=9{7cAa`Up(|Anv_M˾K%ʴ,#?LMᝂZ,t[iDū3N`EIY+*&20C` )Yu.U31 a%0vڑnqBF?.EF%~2bH),VgnY"ffkTF)6tnA 3_ZArzĹTm`HyXqˆb Q^ eSCJTzD^.>w%lɬb@=&T8m;S [ «7})ߩ\$\~1ub`nVޝȽ{q:jOJ:aG,wWCl'042RS= w]nPSc3V;4ʻjw W3hlCzB{OC7694 ڀUH \7IC؍v)ZжMu_ թJOt-O* if_;T:;6=݄*$, _["a tQy,+cr˖61vhP ؘIk }ILV&C]KcZ!8dky^=oydq6LCǽBGo4dߌv2-36eӝ~uq(EIi'N(N]i($vlMA񙤪2< N1zΦҽ\ L羓ڝy'] W4:E5_T5"g]=AUl{1[\fu~|6f67j:LL֮55?`j}z rwny~[JTWWpaAeiEQ %Vi3A*:rկIj 6p2s2l>,CD3/g2 oad)#ڛlaồ0ǘe[$v:J6JwOnevuIM;;5/QC]qR8v ̈́[5h`T3q~kt)uFtiaT@Ic<\~.ȗx{{'v ]ԙsYܽyzou V' ż\t7co䔋(o* zWA7?_֡+? endstream endobj 90 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 91 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 135278>> stream xԽM.=s7Jo7ɡ$#8H+#Z,c'X}Ωfjnï?~?/~ w\T?C?5qhDn@LO~܁~0@TBD^/qĿ{|+/ѵt~k_r l=?ܞcLs4VH)y=p._=􎳨TOo2xNv88"7 gz!V`a?AYgoiPr5|O~jŏϿc?|<4..4?GNtJϨ0R_Vi4 v)?)J2O_W?_~_'?ݟ_O߱;N⼵?~پoomѿ8~o7?X9)a_:齴ߓǘ?կY>&53<Rjy<Jx{x%i9@jO{}%(xURr{Ux[:۩(9,go~O(9LͺߖDa1$BN'm@$qO x `9#ĺ>{H]d !J)r IdHⴟ޵$q1z4&O6 a^بsv6z[2* ޹M\Tf/㤤㢁A-!ӼAޫ X}n~K@`uT{O jSɉ  D5J VOdsi bdcNLjxk$Wh ˊ/P^w*Nz:uu+h bX{>Ċ'= +P0\! ZXsm?@`ٔ{;";Yn+U?lEo |{; ޡZ{ Dƾ15"V÷C9|[@`{;:{>4eh OK|{Zua G@e36@`u9m/4ޮX=q^noKֺM;4Xkds)XT} #>;Cl\̉h a[#@1FiJh b]qM{ 羿=<'G ֚#N{4XMj| @*Rp~+֥ DƼ'@==ZZIXm=<뽹߀{ *= DRᗤ;4dޮQ35RX],X?ӪœwD @E249Ԃ%{XlOX%N{4Ҽf{ ВU:fz Z(7ŷ%@Qk{4֭<{ ~f@ yOV\Q|GXт%%@a!34/Z3$h j}'TS <G=\֪m=R1kЂ%#R=A46P1k6ٞC54ZܷA Ĭ9Z}U4:5}cԂl*uO6=uO8R٬u[))[p f륇uO{&=@vk"ud4 ?9uO8㊬!5 [iM${D٭y1aM${Dvd&垰#f؟#>Ht 5*٭t'n;=AÚH=ݚH=ݚ=z=z"5{!5{!_MC=r23r]]L>23r23rs}djg23r#5ݑܮHenHenw2;Rs3Q۝+QGC;QWC;QWC;QGCD͇nwCD͇n_D͇nD͇nD͇nݾ5݉>5}%j>t5}$j>tJ|v'j>tH|N|J|N|#Q۝+QGC;QWCD͇nD͇n_>5}$j>tH|N|#QGCD͇n>5}&j>t5}&j>t5}&j>t5}&j>tH|'j>tN|#QGCD͇nWCD͇n>5}$j>tJ|v'j>tJ|v'j>tJ|N|#QGCD͇nD͇n_>5f!}$j>tD#tG趇lgG'jo&jO>B7}ng݉G%jە}n>Q|!fD#tGJ>B7}n>Q\]GL>Byc}^L!Q^<~y=k- ȇB GV#Jx4ڙq}p1bOϚ%9aGXe4Fuќ$M>"'lAFd9aE5sı E9@% AL--S &AKJęSBIb&{80I>{ĉ EqbĜYaiZU>U1+}ĉ)!f81u0+}ĉӶP'SP'~} Eq̹-+}ĉY [V(_ضP'`H[V(s{V(zg#d{CE'8q>ݺe{8÷BzIc Eqlm Eqb 0+}ĉ҃-+}ĉ!j9Y#N1kزBGس2>uf8m~"+ljI; 8q. B=Nɚd˒BqZ{f8$@V( Eqb*6>YaR>X7f jEL Eqb͛Ya۷ZU>b3+}ĉ|h}ìP'f|0"#7BG8?d-+}ĉo' a Eq94<[V(_kݏ>RK1+}ĉ3{VX-դjVشnV=¥87Ya$cV-£FY5Jc >āY<,^^` Eqke8$ EV(g&ַP'VcVXmIZA Eq >Ċ/v5+~7+YYx1+2{Vm80wݛ` 9UϬ8BlV gV'tf-]qb+gV'6;N˲B̒4˲B 6e%PӬ:BE6+Y^ϬP:~ljXVf3+ά;N.+wX]VX8qbuYazlje 'V;N.+wX]VX8FVYe '6;Nl.+lw\V8qbsYavlje '.d^bOmPJmPJmPǥ6gڜKmP#9CڜΕڜOmP#qڸTm\s6.չB|f6>ֹ3|e6>ֹ3|e6>ֹ3|e6>ֹ3|d6>l|sg6>l|X#Wfc+Wfc:_u:u>2|e6>ֹ3|d6>l|sg6>l|Xl|Xl|X#ΝX+Gfc;Wfc:wfc:u>2|d6>l|sg6>l|X#gfc+Gfc:u:u:u>2|d6>񙍏u3|d6>l|Xl|X#Gfc:_u:_u:_u>2ܙu2|d6>l|Xl|X#oe6㜱Gfcl>b|:Gs:|:>uu~3u|f#ֹ2Xg6l>b|:Wf#GUod6l>b|:gfܙG3Xe6Ε|:>u\fs:(S\woU"EQQ)">m}/ΉlcATֻ#[ԹHq=kTkEQEXTmOHQ>$cb-yR\{_Kc9=%%yU,"Gu r1)kS.J'{B5Pi$\^a Dԃ) [V4O|=kI |}S! 'G>9'ç`sҊ0|}S l>E[BG DTH>>՝Ve%&*>(dCŚd #>(@)[SὓE+O͜G~$| zϧέk ȧ QLO|?u D{;ZlO|z}#z#m>*EK3f\mVG%s{>(59c[(XX( n V t.O֘ zϧmOٟO{>bO{>5?m!O=Xp59jYIOKIOH#z{9D8IOń 'G>%>>}#zUc$ȧW1"|JBGZU>1|}S wO$fay DԜ-|}SYJ*F$ȧro:>>O~ D#F=kV@7ZKZ0ڪWZXm%ZK}0j`d00b[L":]Xuf`C#V>j2c$ȧ̾O|jq D +>>גŅD{}ƱO|d)'G>Z |}S\>=znV{hYoOdçǖ♵VݷN$ISjg;ZMu TwSW>Uz/Q\w-|~>gYZf3|t>ipY 48ݬ~O6k.DžOϝO>;.|w>]|*)TvS§|SمOΧ ʝO>;*.|*w>U\TI~UK[X%խ?{buD^Xz/Qna(VK[X%խ?Dqwq^DaMwD~Gnȼ&; Evk"ŧ5w:;<\U9$BݚH(٭?*9hl hyhyhy 4ϼ#<+Lx~ę>3}yǙ>3}yǙ>3}yǙ>3}g3}yǙ>3}yǙ>3}g3}g+g#W;W;W;W+gϏ8'wϯ8'wϯ8'qO3}yǙ>3}g#GϏ8'wϯ8'qO3}yř>3}g3}g3}g#g3GOg;GϏ8'qO3}yǙ>3}g3}g3}g#G;WϏ8'qOLx~ř>3}qG'gg7̏3?=<ߊ3?+H<}x q7̏Ǚ3?Og~$Wx8#qG̏3?Og~${9pqGyř3?Og~$Wx8#qG̏3?Og~$gxe˻I &Ia{?JUJڟ#Uco^(QiJhr(p@bn{īFŨrxP0.qPx0W`*,J1p9הXO 6 )at9G?9mX'̡%ըrxP0UX7yQrZ) E0 qOhrT(#"oap SJ329eVc}2XLKVc6m-CbUC0"_eo:``xXᑠxQVxX[0VayvjT9,MqaƐÙXedC[(8\]@:Vp8Aa1PXPzX!ߪ/X|_~0NT< LbD~ ?1 a`#9Ipa1 Gb JNjcDa5NXeWp9A0&Ka1Հ5G(=,*LV+U8\24Kp P< pGGÈ7@Aݝ*wv^%orV(1((=, pCv2-+L0@ac@a1I]@4u4-jd9xF1B[̔D:"(8(,w(#@0JQqx@a1n(S0 B:rPӨ/'S<RTN p* ߠ'3K&H.Ksl3sX\u+<.vg0E ҀEMھtrbu?u]JVp\!S 4HaT@2̏L8@4@VHzlFz mw!si8@ RT@$H 4 qyb DH)jTN? ਫ਼f{á9<խC*e89@ COji"J͇@4@DB N9r|G\K|@4`9e i8,36HNR8<թB!E*'@ :GH"B@2s4@~z,{TWJ24`9HTi8ڀ4`9eR8,'HqxTg:wl!YtTjC`Ule$j Ҩ  j-6V"P+ -k !Am,jNZ|#,=mXE@x# }s6$q91Jm,!+tjC`qCʢK6.Xҩ qjVک URoEA"8=PEBmXEAm,VqH^uSҹ>Ƞ6:47Ԇ@BVC_H! I\R" !XtԆb1P+BԆT.Ҩ @-:h^gVf)Q*!89P+BELmXEAm,@ ԊBm,^uQ"VeѥSP"8R"VZZ,gjEhި /FXtԆbV`jC ūnjC`utD#HK۫AJ@C-$jC` "v}!H6V#ҩ  nu0hEh蘩 UTjC*72 +qRVNڐ*Ys-ș2 WjC`ayP2_ VDjC`+X,tjCĪ,>Ԋ7ڐ?'kŎY!F8VejC27R Ţ[6nX!BujC`螩R1s&[YhUa랐^+ F8붑^G0"!biڐLTjC`qF6VPAZ5NG%E4*Dmq!uBR;_z]h"mάt#Z6dpABVj mH0@:!Xt,Ԇ7Xة o2![ќȸjEhܩ Zee]_HrH=_'&X,zݟ hڐ njCĊ,zݟO%hXVL^VbѽR+B;![2"8sA>z,u#SEBm,$1ۢH$yuf% R+BԆ@Vj/,]_H& ŢS6DH{n$ɁZZH6$r! :ΝXP+BP"!Cƅx"#.ڐčdjClTHzY!FFmHH@!b\P^g 8Dڍ ǔיURn;_HҢ3!8PneufVYtԆFm,@Ԇ?Ԋb=REBm,@Ԇhu+X!Bי^uS;_H[mZ{K+ԆD !P^%*/[3+Ԇjiuk!XJ*D !:FmHKtjC`u [3EDmHd@2!bE@Ԋb%QALmX9 jC`UaQ^V'Íp=nάv!xtzY,jC`RFmHoZZ H6DƫnYbѭQ;_nEBm,!b xoW(ԆguhGŢWՎ^Gnu8+jG^W$j[3+;$H% 5djC2w<uf"X!B$Lmuf% !P+BEHm,r:2 jCw,$jEhUXtJԆ7RRnԆUÉp#,: jC`qC-@NԆ$REZZ sJ6VrH]#"8P"'2daѵRReEnaۖob:Xmo֨ n~:ZzVy^_jiި URo>u[.j> oE@-Ԇ|067 nnԆD:!bB2Am$<ԊB萩 URo !iԆbVڐ=7 jClwdlάdݷ7;_H`f%Am,J67+F8S!b%^u림mȿY,jC`qaN7P*!be@iԆbeP9jE`@ԆbѵREFmX9vjC`񪫃ڐ QYi`i[ihqZ6`!`ikڐWZZ /D{]Ԇ7X!Dcns:Z,z޷s:Z,z> hC`%aSzx^GEO9z,Y׶@7҈DjCB$)D*!:FmX9H7EwjC:=z#ȜCVzJ2 Ţ"l[o 4jCĪωF8SELJZZ!B 6jC`qb6NZXENm, 6 jEhˉΙڐ=TjC`qr6ppJghC`̙hE` EWRbqҰ`X bי@ԆD $Q":цdn dP2U V ܮH6$qe ڐ jC^7^G8?Ԋ^#!XtԆb}P2 ^7""xՍLmH.'.D{ݺAmfYbV'R H6VrH $DjEh "ڐ&;?G"XȺ?E[2YH6D,X!FXtԆϢ9R+;$HsH@ԊJUbѥP"V'ÍpC-@ ԆRXtԆDn}"FCڅT^u5Q+9$6jC`u i8l!,-zPEH-@+Ԇx#,5jC`u ֩ hX!FXtԊ,+Y3}!uKG_F6D,u#xHGŢ׍0`jEhqt:XF6p7¢G6`~_-ڐ jC`|^[`BJmJQk8d\̮ m iuf5"*ڐʍ4jC`"XP+虦uo3!ۄԆ^ aѱQҸ8NmXjEh%aѩPEFmH%fqL$oάv!EO۫6+:$`>".X)}{YE+6+;$`~ ZZ +5C^}Y,jC 75?v!L_HrH-zPED,u/P3!XtmԆ=ԊYQޖ̚hXf`ikX֩ ܍D ^#"Xt/ԆJmH@!Cҍ> R׭YQ9z-^ukR^GEGB9z,uk>S^G+:$` jCwde{qYNԆ$~"FC "B NmH)O^gʂ6pcp X,: jC`qbVV 'Rd"X,:&jCw$=Ԋ URnEDmXEBmH@:!C ,:?ԊDmH@2!bDs#,:jC`ܨ iDC΅Ld{f5^Y,z%Lۋ60mH>X ŢCXsjKf7P3!8P^GE7Պ^gB8kPm{FmH܂ZZ,jC`qz6$r{'jC`ea=S !biXZt6spACX ūnTjC*7ҩ sڒ͒X/7bVsH ӌ n8H' x R ܭH6srAْD!bje{f lq,*!bE@|nE@mH@2!bp")HvH^uS"V~N$?7EjC`qC-@JԆU‰p#,:ejC27=R XyLzZW]NԊbѹRPjE`5@ԆJI7¢K6.z8n$;$߈] )@^GP*!8QW]Ԇty!uX`}Տ^Gk8d\5qGŢ[VEjC^2!xխU?zz#5d,u~:ZŢ{6p7s ڐO$`Myhީ K{]ԆYQ%URoW݈ԆnTjC`NmXƠ6eV8b*@mN$R"z%X!FLmAQ"zݮ/8܈ݩ #zؑenfhcuf ,zжJlVvH*nlN-]_^SEwjC`qbVVDmH'oE'|Ll VjC*^?o8NUX[m/,@Ԇ7¢Wv¬O@m,@Ԇ՞iύܨ Xߐ [2Y!Bו@|#,$jCwg K{]Ԇb5Q+B;D0݈6VrH^u9pȸuQ%jEhqZ6D,uSEωS|^G0C(цDmH:Z,zDjEhUDmXL_X!B `tjC`qƠ6VuH)z=( D-Y2;u6V&R ՈtjC:wg?uf% !R+BԆD*!bp"=URo"֖І$nP99$׭à OŢgWRlNڐ=$ hC`5 ,zJͤD+B!C$=+)6+;$ jC`qr6rwAfWRl.X ܝH6Ҩ Ţ?n,]"י@MԆjipj6p_)7«VjC^W;!8uP:EjE`ikX!FCC06pI7¢ghq֔'ڒYLmHV@!CڅhNGߐ6$p+m ڐ}TjC^7!ddE@̪D!JmXukhhEhe"X!BІbѡRE nج~!uhC7$&jEh5LmH"ڐƭtjCDz@Ԇ -HJԊbѩP"`ѩSҹZeSjCw>t#5Hdv8k^Vh"n,=sbXt ԊH%fED8.ڐtjC`qʠ6VuH]3"י FmH@:!rH@ԊBmH@*!JiԆbVpȸu/E@mH'/sz6VrH^u=Sk8d\Bm,@ԆZeeu/EL>zr#^gBX8YQ%% {zݮ/IԆ^'{6qhABVV% 52.D̦"slKf7+;$H#Ҩ o8 FjC"7X>n$;$ &jC` iZt6= ܱXP+sR6$qbAC-!XtNԆ58P–:X\WR+)R)JJ%(X4c{%Y!H7OVVuH@ Ԇn>h蚨 XbKf^–r:ZZt6n"ChkXZ6VsHu-Q+:$jC`iѕ׭Z9z-@ԊLm,^uP":¢{6r[ Nm,=jEhU@mHய@"!xՍDmv#!!z\.X!FNmX֗huḓ̪72$jC7@=ufe"XȠ6D,:16n,rURoiԆ^ aѡSҹ VpȸX3:Cʍ蘨 rb6VrH@Ԇtny"BNZZ Hs"=܎+)6 !bH.5jC`iуX!F89P+^WǶ:X-n,bfDs#!FCꍰÉp#ji jC`%))7ZXj6$rE ڐ-Opȸukێ^GEBm,-RR콎VrH-Sҹ i:ڐ}iZZZ6rǿ{-&uCʍpZ6D,u/E@z# ɲ5GŢ,`MdV-ukBُ^G~:Z!FX-BME1+ԆjD:!b5mجdd֬B$RIԆ$w 9X!FP+ ܈Dʍpb6p> ڐƝtjC`UZX!8RR܉ ωF8QҸۉa9Q+k\ ܞDҍpr6sK4AJVdY|%š/8HsHX)zY!FCt ^Gūn=5^GKԆnhqV"9^GE@mH\:Z5ےY85QQ։ ^gB8kn5%fqj6D,uSҹ VnX K{]Ԇ7¢[6ujC:w{GjEh'jC`qz6r& ڐƽ jC$Ģg]E0;цGS+9$` jC`)*4v}!H6VsHAQҸЁX3Ԇ n#HԊ’]_H" UjC`Ш i1H6s; XUύ H6`mXW]ԆbqP2 )P+B"!IԆ$ns"FXtjԆ4 S0z#,:jCQِ-50 .$nP"Ch+ڐ]GdjC`qʠ6dpKAK"י]!;}Ԇp'b-S+BEiF GU‰p# 6q_ יx#6 /D{]o:gjC^7s^GEFmH@!;7h%aVppH^&quC4 ^7ù^Gq[2k`.LmHb} ڐ:Z5ےYC jCB2^IYH6$s A2[3+9$HuHNRR@יH-!bDs# ڐʅ'/iF8V jC` "ڐU@ !8R9]xNd<7EjC_ jEhqR6$ReיŢi{%fMs^IY6rM4Fmr#Zt6sp#5EN[3:pȸu/$;$ 'jC^gBBmH)7ȕNmHz[ ڐŴ'nd8d\:¢KV.>Ԋ@m,^uk.^GEDmHZ{-@Ԇd.R )\ RR Dҍp6jEAS>z-=3֔^GE u8k^K{]ԆD.R )\ R"Vxޖ̚n:ވݩ +qt=oKf@v!L_HrHHԆnuCTY?Ђ%.i{%f"ڐn@!bI;$"ڐn'RoDԆT.0Ҩ i\S9]xNd<70 "8!Pd^ WRlVvH\Pl{zY!H7EWjC*/iԆύ$7EwjCJ:X!FX¬̢c6 fjC2)ԆR \QҸȠ6VuH@ Ԋ^!^jC`5 ^!HɁZZ!FCh3}!r6$rƉ ^!+*N$ gjC2W1)Ԇy_ԆT..Ҩ Vpȸu/EG~S=z- R |H6VuH|D! >ԣUW#!OHRmɬYj6jC 92 K{ @-@KԆ$>}}"B׵LmH@ !8mP":¢{Vڐv$jC |&H6X!BNm,^u#P+BEHmH\9+*odvdEURod8d\z]4}{%fe"ڐʇ\4jCrҩ EjC`$DjEhu A7jŢC+)6EJm,@hԆx#,:&jEhu ) |:H68 |hDƅT -,@ Ԇ>w"BNڐćN$ ejC2;~!=H7EwjC:;z#P+k˕ڐGN΍ڐ&lqV$ܷ^gVrH_uCƅz4ےI\?z-^u~:XV܏^GENmr#!BՇZZ,jC`qےFmXL_n"8-P" |HP+B:pȸu=P#!XtOԆgjC`EBmH3@*!bi3}!!Ft:!ϑԆn<Ԋ*)7ڐ _sCuxխ:h8z~!քr $mجF$P>HmH@!_Ԇ^eZbFmH'RodԆYtHԊbѡR9]HyN<7¢C6VuH~!5H 7«.>Ԋji722.qb6 6jC`u ,:=ԊDmH@ !߸Ԇ5XtԆ ~t"F89P+2WRM-!_)Ԇ~RR= FmH7(H-=gh9z-=ùjC | ڐtDʍ9`ijC`qj6ڐ̈H622 ui"יt# I$jCsR ڐDhѝڐlDƅhZZ,gjC`u ^ 4jC`UaѽSԆZZ!!zr8z-=uCʍp K{ Ѣ !Jmq!L_ݩ /D6@JI7RRodԆ%Kf}!!FFmHc!HxŢC6$2 IN_H 'RÍP X!B4y{%f蘨 ڐ̏@!IZZ,:jC ?ԆT~Ҩ 8цb9P+,";eJ/E0mH'' .ZZ,djC2?)ԆYm,]:!"N]5GūjC?!IԆ$NOd\ o9]Z )ԆtiZZZ6q| {"Cpz6$p $R8jC^;!CҍTZXF6$p$QEFmH{#u8k&^G0c^GKf啙djC2@ !CƅɛiJ7҈4jCB[CKԆ 5O`Mm`V'Í Ţ׬h{%f )DJB$SyҨ iH 7"CꍰX X ڐΦ}"B:8 njEhqR6$7¢S6$IԆ$6' ڐ#ȚיURn;_Hb%X)`>zz#!B׭,1`MKhqb#ц ^&os>zt# (׭)r>z-] !gDڍ ^W*!? Q;_H}N>70 ūnh蚨 iFCpj6D,uQEZZ!F8-P" o*!8S2bnMh5.GŢ75.Gn:"`Ŗn`jEh׭D`M-,Ԇ(2^Iaz`(oجL$Q)ԆD!bnجr# Ț[2kz]iizY!FXtԆb!Q"Vl,@hԆҢ;!bu"CU !bp"# VjC`UFmȴJ;!Am,V[2kV9gzY!FCp̵ڒY" 5jC`eѢ!bDs#!FCʍ֣^gB8k.^mfUUfuNm,!Cڅh+ZZ!FCpJ6D,u/WݚhףbѥPP*!bi+X!FXt}jC`qj6fjC`ڨ V^"v:ZAۖ̚X3v:XL_^3v:Z,z>hqVݎ^K{ݚbѭQ9ujC^gBx@-!be@ԆJI7X,WjC^!C4 ^gBXԊ*)7X3v:XL_HvH=!8#S"Q URnDԆ5ύtjC`XRt]O:Vӷ%fe"XH6D,*cWRlVjEh"X!B"^.WRl^9JͪD*!C_%N W_T(+avR/ХZAYIi,4C,tC#(#+ ]YA*J J W]Ȯ ] YP;Y!PYA7//ڏ Jrk,PJ h,*_:P J C?Mߟ3]܋OׁXs_ R q=7u딓GvEbArwd)zPzVXȮ{~?]{@#(#++(+)uIasAISN ,PJϊR{@,`-C#(#+\uGvŐw.d)zWC3(3)u{bR Vd)ZPZVfPfVvPvR)B"K1P YAut`YR Nbh =R 5Ebhl/Ebhed\,NbAbEbhe'e#+Y!.,ХbbebZȮZ嫬PYUW'Y!,PJ ,F|Ʋ+_x'K1x>]ZAYI)R~YaObק@> K9ȻK>]jAiYlEvнAYa]'Ypџ,Nw(dW@=(=+ =*YAFbAb$K1cYɮf#K9ȻN9)5(5+,`vC,`u^>]b|ߠ, T~:PJ Lw}t8YzAI,R ̠̤x)'d he%ŻnC-(-+,`WC;(;)u{'YHU>]ۛ,zBurd]I)(,PJBid)7|>IBeAYl.!')^gONrP}J J C_, JJe&K1U^xХbb$ ++,R pՕMr`z]R ̤͠LPY, Jw]]d)9Oׁ]d)vPvR딓҃ҳu٭}SWtO K1CO.<bϳ:Хbm^be{KOR8WWVX@]`$еbR :R꿯YY!zZAYIWWVZ!bVRk,P J h, Jw]]ՠԬ~, R 1td)X@_d)7:Л,C?ǘ: JwrR_u ~1u t<':=R,P J͊/`b>?]gȻnC-(-+3(3)ubR 9t!ﺹR 0]x)'n]d)z X, X, X,Nwݚd)Xd)fPfP9]OׁjPjVzPzVXsX3]IB,]zeXhe넼딓o@,ynʬwrRX@/dW@#(#+ +YAujAiYAI,P J , JwrR|EbR ?]gȻnC =:Y:ԠԬ0d)XXd)y׍Mb̋ he~SݧtdGb}ΐwrRjPjVX}F9Y!t!2~1}?~̤͠x)'': X, ﺵR qݷ:C, ؕ, R R 1d)yIbEb,΁eS6J#K9Ⱥ΁e)ZPZV& K9Ⱥz!')^fBbhTo.XbNbIb\,F]|Cݔ\Ib.z>I!t1}=^x߻ORUJJ J g,Uw /C/CMbԋ슡v}vek!K142j'K9a'K1ubv]14~_eZ!K1U*Y!n, :夰R ̤͠x)'fIbRSNR ɮ, LwrR|Ibgee%Ż>:P Jˊ/`bGvŐwݸR 1d)Xhd)y)'Nb K1221R ̤͠x)'R 6Y!#bȻn^d)ZPZVFPFVvPvP9OׁzPzVXd)y=:Ws`Г, :6Y!^!:夔R qխIrwsZ }AIۅ,C?g: Lwd)|IbhedEruɦ5Bbh4W/TRQYR }Ȯ*EbhedeION xM̾ L*/+>t#K142~ T|,Ő=R ̬͠T_&K1ԃҳ^j ] YPY!.,ejAiYAI\ue7YpdW y)'eEbhe'ŻC ]+Y!zYd)X@d)FPFVvPvP~ nOׁX: Lw7:P J xL o u?MOׁ|Abhef<蟮3]/+,*헕R 1td)u0 , , ﺱR 1]YR x)'g%K1f#K13Jt!ﺹR Л,C!UR ՠԬ0jd)7l/04 'K142]𜺆^}!Ƨ@,9uOSN C?G: ؝, ﺽR szeVȺxuPEd)FPFV6J'K9Ⱥ|d)$K1C/頻_V)ah(,Bid)MY!p-C,๣AIY ].+ ., (,*.,CAbheeuztJ JɊ/`b^I Ͻ/ K1{:!I ZJ-YaIb]~nn@#(#++(+)umk,ŐR x Y!, ^R ՠԬ9t{'K9ȻOC-(-+,/딓RRGvuIa tthe'Ż9OׁIu zTX]t>]bAI񮛃, , , #AYa"K1G+utW'K142"K9Ȼt w#K9hR =R ϗBYb4Cd]IAYAYY(,F|ُ')^&OR@R Αu:x}d)VPVR䀹_VX5R Ћ,ŐR,\dW@-(-+3(3)U/+ ] Y!P*Y~}~eŇnd)X@d)X@d)UFJJϊ/`4Y@ȮX@d)FPFV$K1ԂҲ22]  h,W]kd)hua>]b: :Cu`>]b\0d)y)'7C#(#+,>:, >R /:C!QR R 0d)y׍AbfMbhe'ŻnȮzPzVX,d)y)'fg%K1]7Y0;YAusbGv"KѥgupQNbhe'Ż>u3OׁjPjVFPFV|, :Ћ,ŐR xBvԃҳл]Y!ޝ, vtIbEruHZ6E\d):J'K1P&YAuR MS h9)=(=+ }u^}ԠԬ Eb|}Bb3_x!pcNbNrPa3_x!(IbebsTEv$ ZR Rj,C=(=+ ]YABvZ%K1ЭbRWWVZPZV K9Ȼ-C5(5+,9 OׁX@/d)yJb{'K142 K9ȻOC=(=+,/UusҩՠԬ0s\WT?]bT?]gȻn,C%(%+=(=++(+)uϑ~ԂҲ#Ut̋, T?]AYAI񮛕,Wld)|Nbȇd)yEbeŇd)y)' X,Wjd)y׭IbEbMbCu"K1лbܨ랛:C?uѷ+fd)U7M/4Q.$ Jbgee%źRQYR mArP+_&YAU/+5(5+#(#+Mrиʸ҂Ҳ22C\Y:YAAbfeedŇ^d)m.?+P Yʍ$''f K142]z7YA feedk!K9ȻvC,C,.ڏ f?2&K9hefeedee'e#bbh:P J bit|ΐwhd)ZPZVFPFVXs3]<Г,ŐRSN/`b#bYRj,`VC,`6C3(3)ubsbsbGvŐwrRy7>]AYa]*Y!^, Lwd) K142]_&K1422uy~|2+ԃҳ]@,9+B#(#+,9 eee&2R kb$K1|MbeG]R., 'K9hR qC߿\ORZRWWVX$CArwrRJPJVX@]d)VPVR&K1ڏ ,ʥ9u 9u:ԠԬН, JwrRߊu'K1 d)~OY! Y!pFObhe'Ż7C,wC,딓COC>&K1]7~dW@ =.C3(3+,`L딓҃ҳ]7+P J ,JQ)'ǻb&OrwkOׁjPjVX}v;Y!pNrw}0[u^,NwݾȮe*4+Y!Y!=R xU>]: xjrd]I(,PJ Ju;'Ňd) K1M~dW Y׽sRR 1UR,Nb\,J}Kʬ x1uB\uo.[ORՠԬT_]'/'K9\_\YazBNb<:G|ZGv K9Jef&K1GvPg"K1Z!K142]:Y!hOׁVPVR딓CoC,ȮvPvRz!K1}b}]zAAi.}ԂҲ22]w$oOׁJPJVzPzVg[}ΐwrRjPjVXd)vPvR랛:PJϊ/`]?+ p?!;YAujAiYaj3]Qڧ@,`NC=(=+ .+딓V!K1V%K1UYAuIabk]z,uurRZPZV|Mbhe'ŻN9)\uWthe%Ż9$OׁjPjVFPFVvPvR랣ft]bx>]AI۝,P J C?: JwrRUw/4Q&YmsXzeVȺQ Y@^xR 줌UF Y!pMs^z}|Mbˏ he%eR 1td),C;(;)Mbgk!m~rRjPjVXsy}~k>I!tr LJ'Yt^}Uc^x!n,CAbIrwrRy,?]bt*Y!R Y!'YﺾR ՠԬ#b y׍BbG%K1F#K1]ZAY_eud)jPjVFPFV|Iru}J J Wp>]gȻ>Rߧ@5(5+,>u, JwrR>ߧ@,~DyC;(;)uIY!0;YAusLC#(#+>"K9Ȼn]dW@,`UC YAukbkbGv]/C Y!+Y!YAuI;YK]w?g;Y!׾>]AY9 x#x˦8+d)ZPZV&J!K9ȺRP*YTRTIbh,CejdW@=(=+,d)upMC5(5+#(#++(+) 4C3(3+;(;)|Y҃ҳeR R l , 0}ܯORCǻ$ 'K9ȻɮuC }vtIrw+ϓAYYAYIk,P J COvt!:夰'K1z#K9ȻN9) ;Y!Y>R Ћ,Nw]d)z\dW@ = YAU/+,`4C\ubR, ?+ 0 Yʍ9]Oׁ9]OׁXstzeVPVREtȇd)FPFVXɮ{1u ~(uV'K1TRkAIi,`mC,`Ȯv!K9ȻN9) Y!'Yzu9) (,Fd)Y׽sR:"K142b]I(,C_?+ .UpuC } I|Ʋ+W qݯr')\WV~}Xd)vPvRR 1t]zTY d)')^ K1X@d)X@]d)fPfR_VX@+dW@,uC+(+)uIambmAIk,C Jrw]d)C#(#+,O딓҂ҲCouu gOׁXs?]bmthe%Ży?]AYAYAI, ?+yAYAYa} ]zYﺹR 0ȮSN X, xUOׁvPvRV#K1ԂҲ:Cuk'Y!Y:0dW@ Y!'YʍN9)=(=++(+) b:ORY׵Ij(,Did)Y׽sR*J'K1ԃҳP&YAu~1OR YA\,C_, J`Igb"K14P6Yy}ye BrbRR 1tid)zPzVVPVR6CNbȇd)X@d)vPvP$h$ Л,U_VXsH|}9$>IB;(;)* dxC\uQs~ΐwg~"K1&K1ڏ!vb{~$K1]n, LwrR&K1Gvн]'Y, :夰q]QR qՍJrwrRXhd)Xd)vPvPN9)> K1/ʬ#琸>]AYa琸>]ZAYI{Nu x>]biu K1"K1ԃҳGvŐwrRjPjVXR qխBrwrR9OׁXd)y)'fŇd)VPVR&K1UdW@3(3+;(;)u"K1v!K1]+Y!hcteee&Ż9OׁIB AYAIQ~uPGd)]1dk^bQsN]ba^AISNJ J X, Jwd)zȮ9OׁXnd)y)'tt&K1TRruORmBru:7OR5Nbh 9)eAYY(, u!?I!ԂҲ22YUR 1R L^]oʬ x>_]'Utޟ7YA xT>_ 04_]'42RB ]Y~QY!zAYaahQ㪫Y,j'K1TRuAI,NC-(-+3(3)uu*A)YlC,ȮkY YAuAYAAY:p=:Ȋ=R{Nu _"K1Co :}?I!P:Y2RH$PEd)B+(+)u~N]OR1UR LpUCYA\, 6Y!P~dW@ ].v*dBbhe~?IIAYAYatqՕIbS$ bu ,PJ CIrP} ('K142X}9P J LwrRC=(=++(+)u;Y!Y6R Ћ, JwrRX@Ȯ>\Oׁ76 ,P J W]_d)7uIAYAYa3t!:0}꺮OׁX}:Y!pNrw}^Oׁ>/]ק@3(3)ud)XXd)zȮzf%K1ԂҲ22]7;Y!pd)XȮV!K9ȻnmC dW@#(#+,`_d)y)'w!K1v%K9Ȼn7C\uخu zb{]gܭY> OR|rR J%K1QYR:]I(,Fd)UJ C_?+ ptNrP} C_,C_, JJc1d)ZPZVȮX@d)uP:Y!PY!?+Uƕpd)FPFV6f*dNbIr}J J͊R #bh__e_YavAYaܨ딓Uw?θ@>t#K9ȻN9)5(5+,9]Oׁ|Arwst̠̤xE:CBbJbFrw]d)OC3(3+,鯟3]?+頻_VX(d)F%K1]7Y!0:Y!0YAuIAYacZAYI\u j@#(#+,`5uþ}}pOXsFi^ܿ;Y~N:u X,P J Wd)y)'fAbErwrR|Mbhed<ǻ:Cu"K1лb٭Y!R R Ѓ,$K1"K142]7Y^}rZAYI<^ORPCd)FPFVvPvP$żiIYR{TS/42RXd)ZPZVd)UYCwC5(5+AbRzb]14~_eeAYae420tȮZ~YabbRSN , xuSN xNC̾ xuttt<֡@ȻN9) <̬͠x)'mC dW y=': xNu, :夰R >R,P J , JwݸȮZPZVzC;(;)uIaQR l0Y!0&Y!R 1]yT)'eee&Ż9I!TRzuZJ-YarAIi~Y+,ȊR,tC,9$W Г,CoC%(%+=(=+\uGvŐw]-d)jPjVFPFVX@md)y)' K142]W'Y!n?+.musHܟq=:C?':CuI)A)Yaσ:R{wu~]#@=(=+3(3)uY>R q=': ?+F!K1the%Żn4C5(5+,`tC> K9ȻN9),`LC3(3)uc7Y0dWnT~xW~1}v+Oׁ>uߧ@,`Ȯ_;Y^,*pvC,~X~1]1]w?,OׁXd)Xd)yNbfgArwݞd)|EbhefRSN>$ -,|~eYR AbhL+]̠̤Wٿ0UR ܈OR?,P J ֝, wNR _&K1sɮFPFVvPvR* ϑ'K1CR ̤͠4.,P JJJ (,~}~eeR Ȯ\uAYau]wNbEbhedee'ŻN9),nC .+Z!K1TR҃ҳRuOׁZPZVFPFVvPvRq)̠̤x'㓥⪻Oƥ|42]w?Q=Y!pKtg|ΐwrRJPJVX@od)&K1TR22]?+ p?.=YAubR x)'n C =dW@,`V딓f#K1Ruρ~'K142C/~co+/42]wOׁ~DY@#(#+,9O[,ŐR 1]x)'gw!K9Ȼ9_OׁXnd)FPFV|ArwrRXd)|, :ԠԬsxzeJ!K9Ⱥ=e/ԂҲ2QY%R Ѓ, JvʬP J@Yd)|MrUU'b3$Pa$ ՠԬkb&K9^_^YiAiYaɸ^AIi,\d)TC+(+)<ަX@d)|Mbhe'ez]ZR ̤͠L,C?'')^AbErw]d)ZPZVX@]x׵Jb[#K142uTR22C/.p?9<P J C?:CuI)A)Ya, :Ѓ, b, xfu랃YtgJrwhd)jPjVz C =&YAucbGv뙥ΐw,d)Xd)Xd)VPVR"K1֏ hed, :0d)Xjd)7:Н, JwsГ, :夔Y!z]ǧ@5(5+#(#+\u:CuIiAiYait|ΐwrR|NbhedArwݞd)ZPZV|Mb,x2ȺGuB=(=++(+)ui_xW%+ eAAC/TR|2+u.<}}ZPZVfPfVvPvR xW Г, JJe"K1Mbhedee'1s}}XtwNhe&3td)jPjVX@id)VPVRW]d)ZPZV|Abhe'ŻLC>&K1X@]ZR 0tmd)mP'Y~dWux}ϱw}ԃҳ3t!:头z2+gu딓҃ҳCo <:NJcjAiYAI, , , JwkY6Y#AYa]R 1d)zl eEr{b?]*A)YAY{O{4?]bzeVhed<T؟3] YAuIAYa?]ZAYI{u }]̤͠x)' KP=G: xu ~ǧ y)'<ʬ x;>]ZAYIuﺵR Л, xǧ yJbfw#K1v'K9ȻN9)\u{:Cs$#wNJAR uBbh |Bfeede,V*drȮXUR,d)jd)FPFVXRn4}ZPZVkB,;|2b넸'), CMb<bϝ|uNJeR '܎a ʅ`Pt ,}= 2R6RQ'Y!PY!PCo#)6CRFTX@d)yIbۏ ["KhR K)Qk,CJwrPX@d)X@[d)yLvT.DBbh^ʼS C@RFTXt1?]gȻ}tKiQ2YhdW@,KQa1R6*,EOfX|~4.eŻ}>?]bYR KiQa]t!R KQabsl]7Y!^?+ ^,e#UR UR KQaR6z)5*\ukl]|)9*RZT|,AI1v8@%(,Bd)8@ (,eu_DbS!K1ЩlTX@d)Xe;C7ǑBU/*, ȮX@d)̠4΅, ȕ, ȍ, ȋ,eJQa#b%l4~_eR.D_J (,eJ#K1UW:Yq)#(U/*RrT|,Ő=RP딃R.D]b"vw]d)ڥKAk, ?+~)=* YFub}2]r)%* =~dW@RfP?!ߧ@RjTƥIgg)y׍Ibn,CR$|Hb xC,yuu)+(uPMKQ2]ӖOׁ֥x׵Lb]JʸR 1td)g+ +YFuARнb}l]'Y!!1R 'b3]1ld)yIbLvV!KyPU׽ur)%*{в)BRfP򾋮G KQ(,к뺼uBRZT|FQ'YR #ZRW9*,X@d)Yם9( Y! gg)X{]sY@d):ȮƥR6΅,CJdbyˋ,кGvJ&K14.e\ GR<b^8@,nX{>]gȻN9(RjTX{KYA{oۧ@ ] Y!,e*PY!,Ch3TY@]KQ2]|OׁeCRZTX@+d)y)Ňnd)X@d)֥x)]J/`l] Pu)+(+}z%K14.e\JW׽7u iC,ཋq=uu딃R/Fr꟮.;KȻ{Rt]Jʸ:p=u YFucbGv3l]73Y|)9*,`C\ul]7;Y!0Y~)=*RVP딃.E0R6򮛋,CPy)3(uA[,CJ򠡮{Fǧ@,{t]JʼRu2uAbhLjx7 Lu]nǑbR KQ2b]wГ,ŐR #bm]'P2Y!Ν,ey*R/Gˏ슡%bbel4$K1eb5]R6Z,6CRjTƥ:R4麲uBRZT|EQ} x@RzT֥x׽Tuz)5*RFP딃CgC,}̠x=?uz)5*RFT|,e#6R Ћ,м*dW@\u=Z2X@/d)ԨR6{@RJTڥ>R6,C ,кu)K]>ROׁ}^ZKA{d>]*RCOC =CO:K?= x֧@RFP:PYy)3(u+ba}/GJwjd)|Nbh\wd)oOׁ?ElB)d)YutPEd)"KȺHeLb]JʼЩ*RR'K1 KC#)>9(RZTX xQvv3J;8Pjg)Ur WvIq ~Վ#)䀹_T䨰lg)|AQeɮX@)d)KYAi ]:Y!., xvQgebȮfCRfPj#K1UWY!n BwrPX@d)5딃Z'K1 b>Ru}^K>og)ƥx=/[t\J C?y-}w;KȻyҧ@RjTz&C,`ft,Wsҧ@RfP* , , X?+ ^,e#ﺕR KQal]Y!^, X, X,e#: 8@RFTJ"KȺn|CRZT&J%K~2{栔K)Qң.e]J'7I,K1T/Fe\J'Yr)%*RZTXs+r*9E{vIq~)=*,G9*v)-*>$K٨F;]' {HKYAi髴3Y!R6 GR\JJYF#}C/C,ȮX@Id)y)K&K1|d֑w]d)ʥЍ,кu)E]>:OׁX\P>]bi|ΐw{G_>]ʗ.E\+3], h?+ >]OSJ YFuAk,ŐR KAS {&KȻN9(,wCRZT}*3]'Y!Y~)=*RVPƏ G"K1w;KȻn4CRJTzȮXLd):p=_kju ~@,ZR6{@RZT|IwmV?]*Rg:CO]u딃R/Fe\wd)z C{poǑ2b]wKQ(,Bd)Yם9(RZT|A'YR KQa#bh2td)X@d)X@Zd)Yæq$ŁʥKiQa9<#)>9( ?8CǑbM;QbwvbH1tId)eh'B,4C\ubmq$Pa2R KQaGvPeR K)Qa]t;~2+4/eŻC ]+Y~)=*,v K1C/C ~dW WK)QaR 1t+d)MYz)5*RFP딃CCRZT|,A]]~m@,}OfX?]gȻN9( 2OׁXml]>OׁX{?]bɮJtF%K1T/FG#KȻN9(,`LCRZT̠x׍Eb DbLw,d)zVC,`6f'K1T.DŇd)zȮ[,P^,e#:堰R KiQ{@^4u_Ƨ@RJTK8Nhdv)-*eld]7M8N~dW@RzT&J"KȺn彳CRjTƥT~Ԏ#)Q,Pl|AbR #Z:C, 7C,  ȓ,CPy)3(DbK&K1ebelX@]qFbk'KyGR}v;8 xouBRzT3]~}g~ydVNwrPʥ^Ȯ̠xJb^J CFbNw6}~У]KAi,`,CRJTޛ:C7qu딃R/FBwld)ʥR 9R6򮛓,Pq)#*EwJdW@ 2Y!^,A̞9(,\qս7uu)+(ɬ xѾC{vIȺA)(,Py)3(uvI!T/FC ld]ϣnBRZT̠4_$K1 Xd):Ȯ:5z*=EAb.M頻_TX@Zd)ڥ#bh2td)ԨKQalWY9*RZTX@d),R q=]gȻOׁX@d)X@]d)̠x=/ur)%*RzTXsC3] ߧ@ ;Y!YFuAaR 1HdW@,y딃U7 Y!,e#R KQiҢCO"K1 DbnVf'K1.EI򠤮{n({t\JJCuϝR KQa=}.eŻsY!ܖ:м5R R 3w6Eh,K1TQYq)#(u~I!TPY~)=*RfPN?+ N, x^,eR!K1.ES%KhEFbNbȇd)-z,C, Ȯȏ8sPJ?b7wb<{wQ{y$C?y$иP~dW UrJ.,eJ!K1T.D4C>t'K٨.:?Rw=ΐw]]1tMd)ڥZR6򮫍,P~)=*,v$K1C/CRfPڏ \J CLbh] J_eVR qյN򠢮{˧@>$K1.E<_Su랯:CO]u >u'B,딃}bSW9~2ȻnȮʥ0Hd)}0+3]7 Y!0:Y!,e#1R KQac{ʧ yNb$K1"K1.eŻnȮzeCRFP딃U Y~)=*,`UV#K1"K14.e\ GR}6v?8PFd)ڥLFOJAd):"K1.e%3rIqv)-*lTX{_ub8@ dW@RfP* ȕ,PEd)ƥKYAi, OCRZTȮPY!hWu݁TCFbNbh\+,CPPYF<_4Y!,иP+Yʃnqսۧ@>"KȻN9(,ཋn}Эl]|OׁX@d)X@d)y׵Eb M?= xۧ y){#K1нbot!ﺾR 1dW@ = YFJb\J Whd)֥x׍NbEbDvŐw,d)ʥYR R6S CNbAbI򠮮{o@ }4/e%.u ~o@ "OU|eOSJ_$K1 Xd)Yq$Ł*J%K1Q:Y2R6;sPڥKA'&,P~#)Щl4X@d)X@jd):u&CAb(_J H,DYd)-?+ Ή, ș, ȕ,AIAa8@,Of x2{.#)q$Ł֥d>:X@d)}mI!TȮX@Md)X{?{#)j%K1 K1"K1.eŻN9(  h,e#:0td)R6?+ R 1td)yFbNbAb$KȻN9(,/CRzTƏʋ}=?]Rާ:CuA)R3tS@Ȼ}>?]Y! h] w\0?]⪛,м:堰a~֏ W&KȻnC,`-CRFPA8@%jҢ2Q YFugJEd) K1.eźndC>"K1)]1~_eR'K1QY!N,AIAam:Nh\Jb8@ ޖGRdYX{[sW)* ޖGR"K1.eRrTȮ* x\~2{ ޹0td)UZJq)#(ue*R Xd)̠x׽u , , ,e#:0td)$K1ڏ ["K1.e}~rPXg)ƥx)<(;K12, xQu랧t{"bbsR6,PYFuA{nu~)=* =YFu#RУl]7*Y!, ,WXd)y) , ,e#:0d)Xd)y)W"bR KuA{^:C?Gt!K~MʗP Yy)3(uW-K1TPYnJ]KYA) |~GR]J@)d)UǑ*RCwC, Z*-ELC>"K1.e#b9bR6髌;Y!Yu)+(Ӈd)ԨKAY\uybm88PP2Yʃ8ɕ,P~cq$ŁXt;{nGth^ J} xnwb]b=gOf.eŻy==ʧ@ |dOׁ̠x׽wur)%*>$K1.eedv}ZUJbhtY8@Ŕ#j(,Did)UZ CNb C˔#bȺn9#)Mq$Łƥ +Y!Y!. K&KhRR KQaRR6Z ]Y|)9*,Ȯj"KyGRlR 1qGR_J x#)R*)GŇ^d)X{WTYztP~)=*,+ɬwrPX{T?]2] , ,CNwrPX@d)pIw]_d)zȮƥx׍Db{:P0:YFucR3]KYAS ,мy)M]-OׁX{eth] wҲ}ʦ"K٨JELvеbR6PYq)#(uGvT.D]J h,e#VR 1tkd)X@d)֥xPYʃ}FK)Qa1f|tF!Rk:CuMt(_J x00>]{_O򮛉,P~)=*,`ff!K1гbl]яOׁ;:Pzl]_"K1GvŐwrP䨰R qխJwjd)ʥR KYA[,ŐR {]ghugJAId)KA;sP*J&K1C]R eAb]JʼCOC, e+~)=*RVP*CBbBOfX6sX@ndW@ ;Y~)=*RfPz*=E^Jʸ'YF#.EK"bh2td)X@)d)X@d)֥,., (,иq)K]>:OׁʥKiQ2]ie}ct߁:к:0>]b4u x #{`>]bR6{c֧@, C,-딃R.DDvz&KȻCRjTWCRVP딃C7C> Kh>R ɮF!K1УlX]KQaS=O:C?O: xwbh^ ws>KQңl]^,CNwrPXZd)yIqR6̞9(2b] R eNb Cel}Eb^JJ~dW ~(2#).ErGR(_JJ.zGR%~nnq$Łʥwg)֥d.,C?_Ǒb5]1TXsCRJT6C,%+R2.eŻC,5CRZT|AwrPX@d)XsϹC\u>]gȻ'C 3Yq)#(ub^R KQa}l]Y!QȮƥ1Ru jҢ=̟3]ur)%*,-͟KYA{\@,yCRFP딃C/C ~dW@,R6S C7u ^, X,e#:場KiQC2q$dvsC%(,Bd)Yם9(e(,eGvϛyIqRR R6;sP:uC Yu)R ]C?_4Ǒ2X󕙝bk9Ns=8CAQfmi9~2+T/F$K1f"bȻnC\uMt_J X?+딃V&K14.eŻnC,`5CRZT|Nw{/^?] XF?(,B)d)#))]C}sd]M:NdCZb]wTJbh\uݙCwC eb)]1X@*d):uC Yu)+(/ uBRZT:g+ ȕ, ȝ,Wl]'.ee2tdW@,4C,LCCDbLbh^*Gם9(RjTC[Xs h ;Nیt*r)%*\u=ZRW))*,KR >R6 G!K1];K1O]1]7Yz)5*,`fCRVPf!K1.EŇd)y=/vbPu)+(3}R/Fn%딃V&K1V%K14/e^JR=7N+}COCEbh/*eSYם9( (,e#}R SttPGd)֥X׽ߜ,PEid):u*CIbEb]J4%Ȯj髴r)%*, 7C, w:CIb"K14.eeɮʥ0td)X@d)MPdW@\u2~_ej'K1.Ee^ʼLcV>N7N2{"aYu)+(ub ʟ+ΐw{W?]b^R 1tod)̠xNbCRFTXjg)y)CDw{C?]bbZ]0Y!0&YFu ?]*R/GCuAaYR 1d)֥xAbn.C ~dW^TOf\R K)Qabth] wrPԨv6őu>zl J#K1LyЊdDeld]Z:R 1+UZ(,e#3$K14/e#b%ңХlY@d)X@d)X@Yd) k#K1.Ee^ d:R 1td)#bhqյDb]J CLJvb7q:!~uB>$K(RTԨp=q[u[:Cu 3Y! YFuR/G7u:=R R6 LbG!K1Уl]7:Yz)5* =dW y)DbJbh^ w\d)zȮXJd)Xd)y)]J X,AM]ykqսoڧ@RzT+jY퓯vb^J0jG9;sP2J&K1P*Y!N,ei*(,P7}ՎZ(,eJQa\R KAi~QaR KQamiST$K1GvJ"KhR KiQa2R6 ]Y!P3 x㶳CRVPCFb^J ,Au~tC:!~ouBRfPޗsu ~@ Y!dW y)럮1{o?]gȻ꟮R KQ'YFuGvF!K14.eŻnTC =Y!0:Y!0&YFuAa#bR KYA{F?]jҢгl4Yd)X\d)V"Z]2Y! Y!*YʃN9( ]v|޻?=Ѽ:Г,Ő/` h8Αu{e)ڥLBQ~dW@ңЩlTJ'K1TMɉ hdCBb\J ȕ, ȝ,ey'Y!P2 (,eQiҢJ#KhR K)Q'Yu)+(EbLvеeg)*"K14.e\ʢl{j(,C:G)}RPY!~n넖)kut%bbelT~_R.D_J ?+j*5E"K1Gv޷ɬZ%K1U:Y!n,e':NEbкнR2.eeEϧut ,м>R KQa#b#eAaQR 1e)%0:Y!?R KQ'YFuAacb3]1], , ,e#ﺕȮԨ0*d)Xjd)y)$K14/eJEe/}c~)=* %ld?=sP*J%K14.eź}dY!jҢ2/eeRT|Ab0R KY3Ň^d):ȮJ, %C, C Y!N,e"K1 . CJb(_J ȝ,мy'Y~)=*, /CCNbAQge+,CDv.eŻvC,Ȯ%딃Z!K1Z#K1UdW y)Db_J ,AY]+Yv)-*,7z'K1ó+ ~o@,COfyز+ YR KA[]KQakbR6"]'TP YR -SR"bȺo7j(,0G9c#]'йbR KA, CՔиP&YFEbDvеlVC,6C,vC ~dW^T躹 ]'Z&K1τrtU:Y!n,CEb=\sY@d)Ԩ0t_d)XHdW 0 Y!QR KARQa{: ,к1R ȮX,d)yAbW#jҢV'KȺnOrtP2R KYA[]'.Ee C3TCb4RTߑ,K1T/F:GU/* Yv)-*,}G9~?uB,ܮOfX@d)ɮC ]YFNbP,eJKQa2vC> K1"K1Z"bȻeC Y!:Y!YFuAaR Ȯ, ,CAbG"򢦮{?ۧ@,ho1>]gȻ}l;~2+f"{?ۧ y)g#K1f'K1$KȻn.CRZTXJdW y׭Lb\J CFbh^ wrPX>]zNIqut$ʼn4FbhLvץ~ב'*(,PRR6N,иR#K1T.DŇd)X@d)uz)5*>"K1.e%寒rTX&EGR'Iq{pIqr)%*RzT$ C7q:DRjTX{uBU/*RJT|Fbw]]'|,ŐR 1tMdW@,딃.E:CuAabͳ+u)+(umbm2]]R Ȯh딃:и O{oǧ@ Oׁ̠d-d@,`+ R 1d)yEbиﺕR 1*d)Xjd)XZd)y)eq$Ł:J#K1.eźAi(,Dd)~5T.Dӏ H ٔOfR'K14Lə슡H1td)X@d)X@^d)H q$Ł>8P~ϣ%8@RZTX@Yd)e~ϣʥ0~Iq ,e?Md@,eC YFU/*,ȮX@Od)޿GR5,CNb_J xo y׍DvУbl]7:Y!ȮzC =+YFubbR6[ FbNbh^ʼ{Y>]*RuB eld]w4SޏH LuݙR.DǑ(,e#뺼?+uBՔиyʗH1tdW 5., x?+~2{~)=*\ul,C,+ P+YF88CEb[&22Y@d)MC ~GRJb,CIGR|rPX+ |>,к1R KQaR6{L~K)QagR R6򮛓,P~5uu)+(ujҢ2/e ;K1"K1/Ge] u]y#)T/FeLޯ[YvTP YR MSr"bȺ8@ +Y!΍,Bd)-P~dW@ ]2Yy)Rd6ղC,YnJdW@ |H ǻR"K(JE['bϧR KA) 3CJb_JʺU?GR$K1GvPc#bcb#b 0GRu0 Y!<,Wd)yAb(_J , X!:KQabkl]&Yv)-*{?$lLם9(ң.eź|tP5%ȮyLǑBHeIb~V&J&K٨0t.d)X@d)KYA ~(GRȇ^d)X@]1X@d),CRzTX@MdW uP3Y!ZR KA P~+f*3EIb_J W]_d)UV C ,AE]>GOׁ,1YY>]bgeOfޏ:WV>]b#b+l]:Y?#)4/eź=X,PA)d):J'K1PYFumGR^J@Yd)Yם9( 2CBbNQgibɮ:C,d)ɬ`'K9N]J CA} ȓ, ȋ,CBvbR KQ2.#)Rˮʥ'bge=Q{ԦHKQa#ZY@Kd)eC P.O*,UC,hqսu 슡JMQa}b}l]72P0 Y!0=bȻN9( =Y! X,e#ﺵR <8DId)YqZ*(,PR6?M}VVd@eDvEIBb\J xC,8=J |q8GRs"ңvt#뺾?#)йblTXYَbybɮ֥TP ,CNQcR KQaS;Ne:CBb['K1gW@,OCLvУ2]7&Yr)%* =dW@RfP딃f"K1 uurPX޷u ~?]gȻn5C,` C,}i?][,e#: <80dG9;sP J&K1Q:Y2R63]'TQ*Y!Ν,Bd)Y׍#;bq$Ł&:CJb\J ,CE`VȮX@d)ƥL6R K)Qң슡z%K1нb>R KY2躱ڥ0tI!;8 x%X׺H1]1zU+ Of4.eŻN9( Yv)-*RfPτ:Pz 3a]Ⱥnj(,DYd)OfPRR KYA *YR S슡й*Rr%K14/eźntIq Ν, ȓ, (ʋ8Ⓝ7Y#)H Gpr)%* A8@RVP2 xoY!PY!R6* ];Y!n Lbou*CǑb6R KQa=]1] Y!~?@RFP딃 K1/GG!bhQR 1R 1LdW y)Lb]J Wݬd)yIb߇:P*ٕ-uu}Gth/`Mqd]wBbTC eld]ή*J'K1[H йʗP:YiJIdW U., BbՎ#)Z*-G[&K14.eR 1tOdW@ q$ŁX@d) 0Wd)z&CFbNb]J C=|I!WY)*,`LC,`,C\u3]yPHO x6C,y9+딃Oߧ@RzTXd)y=w;K1T/Fe\ʈ x>v[9PRR6]gYR uNbhLCDvTQ2YRR6]?Ǒb/d8@, +y)3(#}йbR 8кR 1 W"b{;K1V#K1/Ge] Jc+ dgSJ!KȺnS|Iqr)%* 2b]5J>8CDv/GeT=Ǒb4R 1tZd)-+ ȝ,CIbˏʋd6?_, xK6]'UǑBU/*,&+ Y!2+ȨTaݞS(V1KXYEƜ oeKDl">J ZS dXQ3o¢`Nh|q+l@̩<[Rĝr*@,O&#<ߐE> PU7s*@,z&Ȗإ[[a[!.(VX~-)VZ6`[9@E7C% &s*@l~34^ |3̙SbޙB 1AG>2+ȩTTaU4tTGYY^|ȬKԠ|"]s*@,:s*0bG! n9@΢[gN~z+㣌[1*9qK@NEdK AllʬKJ¢-)2n Cv#߭{~QeK A,zTإ46`LT؀SbÙS YG! Y>]`OlI! |dV~G!U*s*ά[9 &s*@ltfuTGi2?ʼ69@ge^u̩h_G!bܙS YKDh|q+ l-)j, QeK AbNh~y+NŘS TXIԡOV>2+hQ)̩¬%lI!}v+[aE> P+o[am0Ģ1|QxՍƜ }'L% 6`8s*@l,G!bs1hַ2뭰|Q֥[Y[ak0,gN | cl0Ģm2Ģm1(@gygNE1ĢݙSy DQ̩M*9 (~)u{2T:cNh}u)Ew% ]&s*@bNȨs*0b*[Rb՘B|"6 Tu6-TXt3TXtGl@̩}1Ģ-)GKlÖ bFgNWݘ̩Z,z,TXղ%QƭGK1=s*@,zTGɜJ U~G!bѫ0ʜʃSZe]b}1/]=4>ʸ^uݙS dX6`BΜ 0&s*¢1>z+YyF> Q(VG<?r* )r*@[akB-)Ys*@l3#,cN>]ʙuϷuDlmTG«y~Ytf T3TsTY(D,;s*@,ڍ9 69@u@d(G鷲,T1.. (Vإ.(D[T&s*@QREQGʜʃ%EICNE?M<EN h'ϛ&[Rbϫ-)ɐ&[RbzgN%Po[e ~^ڛlIݘSbƏ(Do ~~qdK A,z6`Ȭ 6`] ux=?l% (~) œ ?2~QvcN(lI!hPY̩cN%f] ފnE 09 6`-9^Ɯ ƟKDlBxeNAܒɃ9 ے-)EќP¬-e#ԨLTGT9@u=2u(e0EgN%P e(|dVЀYȨs*zy+ (Dl@o̩}0hMĢG% 6`,T[[U7G!bѳ0ƜJΜ 0s*@,z.TƜ Q3hoGr9 6ƥDlK⪋MlIA-)^Rœ ТҙS YRrQ&}G/[R2*9@u> (DJgNE{ْB&s*Z}+ ȖXt1˭0ʜ {gN ?3h9Qڭ豘Sb%̓BĢOdK A,z3؀SUg?h+̩V `9 69 mΜʃ,gݞuDрx"2DbN%f] `9 6 Q ߸I<% S č QΜ ФSr(1e##|)bNhAodKDVVX &s*߭k1Ģ3Ģ0hYAl~vuGY2Y9s*@[agC..}Ţ}1Eʲ% 8Q`Y˖֙B,Taׯ..QRSbѽ1Ģ{gNg]5r*@lG-)禿˖ ~^ڻlI! Ɯ <$(DQR*^9Qڭ Ƣ1Ģ/ElI!h}u) |(:~NeK A1dPG¬͎"ԩ TŜJ ̺ g-)U*Ɯ Ѐ*QXeKDEʜ s*@l@_̩9Qd,ztT؀1SbbN%ØSbk0Eƥh"(Q g~h(D[Ye )r* F> F˖X/TU|eK A1C4 TbN%P̺;tْBP@Gf-*9@1zl;eK A,s*@J-G!2*9@1zV˖Xt3TXtsT؀c> dzgNE ƜJ|"6`Bīn6TX̩V  09@΢Wg> Ƥp) pi`9qKwBNE?lI!E??%h@A1b"4,T J)GGf f]eK AdNhB(DNřS 4Y4)E6SZTs*Ќ9 G!bѣ2 9@|Ne2Ģ3Y1 ߕK]4E[gNAܒ"ȩ#؀e%U_|dKDEw% jʖGYY?%Fe24s*@e|d6f]̺D`NhQ̩9ZY$ANhP̩:S 4Xt5T؀B|"= Q&9 6`NT؀UxխΜ P(VXrT1[r+,*s*@,ڜ9 6O> f]-2D㣌[1*9 <2c)G!jT&s*@[q(UHTXt̩u*9MU_NeK A,7S bdN%f],eK A,]Y-)јSbdN%PgbNEcNE|Μ oׇ̺D`NE/cN%d3 `ƜJŢ1}v+lwT7TFEʺlI!hRi̩9ΜJ ̺ɑ.(V3YF> Q_̺D,:s*@,z3-)#On̩ϷޑSZTs*߭ޙBϞ̺DQRSK?J6` T؀[VX4T؀c> 6`MTG¢bN%d3TSf]"mƜJŢ3(⻭ Ts*@ta֭-)u(YhRi̩GKqG!bѣ0hQ̩ 3h"6`:s*&*Q:Μ ^Ɯ pJ`9 69 (@ƢݘSbܙS8"r%r(| :߿%ydVТRSyx1Ԡ!+[ReފQ̩¬5"ĢW% ʖ؀ilcLf]eGKi,z*[Rb3b> .zb˖b=ٙSPG!ZP(@1~"[RjPfa> ѠҘS2*9@1/\eK A,zM赘Sb|XaNh|q+,ڌ9@3xa> ѤRSbޘS ,;s*@leE 3눸%(YԠ9,[RT*s*@Fe1(f1dK AM4QdN%P̺WQX eKDVV؀9͏2o%alI(}reN~+ .uaxK%[RjP(DqŜJY,¢14|"6`6aոs-)蹘Sb1؀UBΜ (V8`(Dl-Ty.#/%JřSP"Ƞ| ̺~ȖG2<_!D> S̩, aȖG g> PcVc> QSb˘SbVk̩>BĢ}1hh7TwT}ud(JgN%f]l6=dK A34|"Q0b![Rb-)-(8rYyqKĢhG!o4YKDNC(DJcNAܒblI!BEf]e|"|7dKDE?QlI!Q̩M*9 b(@Eʖdd(bG¬w%[RjPzg> Ѥ2S2(3.|"&QXZ̩EbcP¬۟"h|q+QR0c![RjT9 v:S Xt̩u*Μ Ђ|(@uڐ-)huM gN X?9 s*@,zuTXrT؀^YY⥽ʬKĢ/[R$*l~Ȭ 6`Wu+ CIT9@ulI!C|e* ̺Qr*@ ʪG!T:s*@Fe1ni|"m9 |3dK AlWT 6(:6% ;(mTcNhBi(D;Q<*Q:c> (RbbȖɜ РS2(Ŭϭ3C(DjG!r*9qKO ْBP]llI!Ƞw%EukeY'Aٯ%IřSr(ϗCHνr*@`NhA(@E|Je1Xc> oKeKD1fI![RP1hRqTJ;(@1fْBЀ2QŜJuQkPϬ4?ʼҙS n-g/QΜJ :|"[RPFe> ѠҙS2(s0Y4T+G!xgd%r*9qKcʖ=dK A 8(@uG!jTs*@,zJْB}̺qr*@h̩j, Qxjd(œ Q̩¬\Ԡ垏 T9 Qn9 s*@ 1n*YE|(zJ"cKNt*9EŘSr(| ̺dTuQX,+s*@рSˬK4BT&s*3~7dK A~6YA CfAux/[RblI!ET% b̩¬-)u*9u\G¬_ENh@IYȨLT#s2(D OM*Μ PqlI.U%|eO!u/%F04TTJcN%P̺%T-(8g r(Ŭ[#g =QX/9Ɯ P4!g PG% :Q6xdvEȬQ,(o\HnὛlI!EWْBЀ:QإT˖XrT;(Dl~ 9Ŭ[%.ْBP% RN> P̺KD> Q*Q&^BP3#+~k(DdNhA|ɢ͘Sb^B H CIdPG¬ lI!C| e9G¬k0g](eK A J1.펜 PS.Ѥb̩9gNAZ2u(T.uX~9-)a֍8 % y3QΜ A(@ux"[RPEDȩTTa͸eK A1hQi̩¬ż#Aٯ%(:V% RO> fIYSqT}*[R$¬ۿnGB)(D4G¬x{'[RjT&s*@"/u uayɖG8s*0<~d%Eۏg T9 <,(fݓ9eLm-)9JuyEX:AbNȠQb-)5*Μ P4VNSY̩Yg<֐-)u(ޙB hɛKZEȖљB4s*@QbYJQc> S̩Ygu?(DQpdKD1 ?-)5(#Y'hB|(zƨ-)Ŭ?3u(m0hQ̩Ygɐ-)5(B X# J=(qK lI!(IlI!hA\PG¬|)ʾPeK AFe0(59ȬuM(nQP3Fx9-(Y(f% *Ɯ A|u# ْBvcNh7Ku|dVC|u+]ْBЀRN> A(@ CQY"CIԡ|g%Y8А-)5(hșuG!,^gMnIiqu(>eK A[q(Ӂ)[RP3hAy^"(f^$z~9(PP!g"gWe> QJ<JQ{3)fnInWP (u0ȶXܒB(PVfnI(=}gnIhBi(DрJ;2fR(DJ/G!ZPfc> Њ%~$jTs*@QJ"Q+ {fnI(/gPڳ$e*\|dVр NѾ~ hqnIh7̑$PQ(D%s%E/$㳡[R( (FBd(YBq3sK EKܒBn@ Ѿj%]tܒBP̺ܒBn@K#"h7a3P3bk0CI72D7r(U;f]uZ(DX)2DcPŬ-)G,4sK EJ;(D2n~H$zV:EO}5!X'5Y3៺#X37lX$+aL4iwb/l?bX{5S#s a$K[=**hXg5 kXXp$~Ұjqw؇2˥a=Lj&bX5íÑz`}aML(z`G}bX5NiX5D?IOOt p?G{dZyx8[q8 ۿ6siÓ`FsxM}8g7ْ fp0Ý`=[2<<p$;{ ǥaL-78) {(-X$>{ez9auw}8g_(/fp=[29lp=$͔ÑxRop kfs>ŰI- 8{؇9< `tfEaL-W8F3yqx3[11[N,0 tfհh&Tå4pÑn`tf^̰h=_9<'>î= #eaaGaH+@̖aLnX+4S# ktf^wʰ>h~}iύ =TVa@u؇#,la+@4H~5yRŒ`FBaW>R3KFΗ4 H3wÆ>%Ž34BD)3.2ȰgFΛM#BvVRh$ĖB ! NRѥ82 Bĕœ !<1,RBBęswE<%Z _8X B.q@ Ds;3*BqdbNv Z 9ϼT/ QʴB^Bo꽽aNK aǜQT8Nl"L%`)UŘ3jogu:HRf/ ! z 1g"Ԍ3D)s8R !D49@ DlLa-L# !K "KoAN@Bxˌ;rsF(ecfMaǜQ{/sΨB)S-Q9Q'Jm(e>98gcR[ $D)s@B@2 !bS&rq` vR ̸DS BD9վ@ 8L7O#KsΨ! Fb5pbF8R?W }1g0̜3Ėª ! J@(eb,3(:.F2c(9gԉrX6S9qFh"8sF3.q` "z AB3JK2$%BcΨ\:?ΨOyqF"%V{ JW"ZQ'Koa "Z cqJ@B@l) A DS ŽK\!ik]<MNu\!S(ezQgԉv \N/?ΨW qcFE,2/pFXS$!R$!Dt3Ė( ! N& !DR "ʩ BD9B % e!e vrui$D) ! ʩaQ¨oA( "J vC/Q'6By -%K[Ì:P@ D\"4;rbF(e @h"t;:A D2(eNT[ Uf e3D)+g#,eԱ@ĖB ! N&r)3U')%v&+@Bؤ)ǒ%0Rs,G)Dh v<3\:(eZ Qʴ!j3;9ϫ9x?'t,@)R($'@R$)EX v\%@B@" 3BD)7Tel<Ոv |/oA! QNX 3 cyI[2;m,-ȩ~gԉr?u~ j ! ʩ~fo:Q~qF16)hQ'S7dq,$)q0@ D4Ž9w"8N" g _!ev%DX vUx:]B{ R(eaU*oANu-jD)s8Mʜ$D9ճ@IS BDK,QN "ʩ^!ct'vQy3D)BD9GXX2cΨ+QQ'63OsFh)8"" %D vOΨB!D0;jy Rf8/a9ܛQ9.>9N20&j  ??B ! Jm@/aȩ?T!\Qg3q)sLTQNpa@B@2W QN "%K0QʴHKW[SmBD)Ru}? [9T?wB\^ΨgrQ[0ނ\Q̕3*Ψ绡3 񔹟uL] aǜQB^@Dp;:@ D2@h"8sF!3~ -H@qz -ȩnBD)@B@ DS;r@(WTw'lu9 $D9իT !b "ʩMf~Ǎ3DeQ'JqF(sFua@h)AB@2@(z4"N9c@(WX "J@ 5;¬ ! ʩ BD9A D2"b4)sM% H]( "J1e"ʩ6.DpQ1:Q|~Q'KXo=9sF9N" M aKT B)BQ,G$~ -,:Y'Rf] "ʩn$D9m@1gԞQ'KoANu/ "ʩBœaַ e!]%ZX|PI⼄ v49ի(zUZaG2i BD96@ DS3yO/AAJӀ z."9~ H4PW*G r,n 7Q ~D0K J_AA? J.EB sQAA6QƇSDP辈,>AAQ(S =di0"(4`6P媛Fy(RrPM`?PV K6Dv)ڜ 4q+UF4x*N {+(NS)(3^ QƧSDP"YW,E#,EB 2L> "Si(f)zQ,4(KѭAAIY Q(RtDP");di(D0K ,WXDP"O)zdilD0K$yI֏(f):Q(R-"(v+V\"EAAG 4 . FY:DPVLJNrKBa"(.J#9gEDP(F%rκ=Bak'4`ruDP"NsT&di/"(7"(u閳_%AA[񷒳N[L"(v+Vrtqen>DP˻4PSunA\&#R(%EAJ(Ҁ KAlRtD0[E:΅g.ߊKPnDPsY譴[iE _:gE Q(ҀUT.g J%seןG %KAAnR9NnRMPfDP  Q(Ҁ֠CY  i@ Pdu->u[*u [YUFny{GDDP( R&Q(9z|Z>82wQ 򺕥ʔY,E'g]! l,"(s} KUlre{#ҸY*J* (f)z4"(ˑ҉DnKi" Ny(R~嘜u'r)|i~Yu׭2Y KsAA2>݊E/"(+eBaJrٷTK292KkAA[Q(VGmAAmAo|A{2uccP+g4`ruҀ9YnP.sQ( ҉tU02Qu+!R)(f)T"(Rjq֝ܥʼn8'mNnTYoF:fY,E?+#gκ}K3r1[ENŹx(+8!ujDPWQ̏L"(Ϭ{2D1"(%zf]3^* 2L"(Rtq"(ҀZBARF(]JYJGi?P4U"(VG[#yImA\<#ʸQLJƺEIermTquۈGf%(Ju%]&ym$KeA\G sQZ.FJ'Ki[EjDP"ߥGѢ 4BaF%(2G['ҹy* 4`$8NrF 'diAAn>=d)zt"(\o-zA\Y,  4`,"(r #b\X:f)zf9똥0YoEgݾwuR*D0KV%YgDP۷Wy(Ҁ}fxd6κcX! YAepT&ɵ/~i뱜uR:̉ KYVG+:|AA2>4JduDPO5nq)J!lT"(sł^ [e҉ (JuʸQh#by(LBrQ&92[)RI" 9g9)z rs ZAAB rFqKE?6uA\gQƭ"EBAR͈ [EޟCqۉ̳^ʬE di@DP"ߥG", _9lboEg(D0[EJRt"(u ',EF ݊,. x J(r=?u~+Vt=&uҀՈBaI%κeDPީr1KϡAAX#ҸW9YJ QވDYH> ۭ[7"(w"(VG9EJ= "(]IybDPK̺-RBat"(&$ʥX(Rۛ-)$[EP Q(9@PYOeAA^T)2À KVnI!Y~>oܒBݿJ)E#ne*E?7[RH ]&d)РrK v+VrŢ8W>JmD0[YEP;շʪe,AlR|i@u"(ΥQ(RtDP"݌qKED,[R<A)\ZRG;Gf%KF;HO'*,E?=TnI!Y(fiixd6s<= K-.<U%lIFpK Ҁ}2Ȭdi0"(r; nIK)H y(ҀYDY$uRlDPh:-#g= KG ݊uAAnU"(V[Yz+H EO"(u훴Y`(fiu"(r Yg]"E$2m*JdufDPhA٬]ѩ#1 ٢b" ^\uFzxבeu#=Ȳu<k׭{]G߆w٢b"C]xG\EɌ(u}ٕu7Ȭs%3FP ޭt)2tꌠ-*v)ӕcpsf%], 7FP0+?c`lQK,$FPbt\P_PRd3ڂ.EVSo\,FPٟ,WRѩKQ駒dɮs.Q)bQKQ]~6rխ_|M:=AϨSѮ[7iӻ, XJN: m?u+]G[GfeVA)XA]ѩ#β댠l֮SV2tOe0YWFP5*Rdne}02mܢ.eDev(eA1%W:#(ɕ6H>N,?:,C;|Jre0.Q)" -*Oc:NAKtFP:lЭ뜛(ɿh+`dVS)FP&Jc,CJqe02o\e91 ~Aߥ93m\et;U#(`:OFP&͟Re%3'i\)KKg2AwE.E?r*:;ASI2Z+)KTʥZb8VA)#TFP6A!w<:#(Õf:NE]G{e i]n)(-],xhGP:+uJJY@GTƩh׍kTXTRd2Y$: =:#(•i#KV3*S|tݺuz)=*TJ#`#βلA1>Jg[:,`Sf>2lQKLFP6{}OWR7Q*#(wܷ]\R.Ţb2D1FP ι2eTAlRd2$cwPƩ.EB xA|ە XpJ? ]=A|O)?F[!Jb%eδeWRߗͅ|hW:#(v):`e[Rd.-*v)3*T<:z)ZA٬]ѩЍo.EPuu02p+'g/eFevG"Ch+duFP6wYm,  Xu;^2FP ܼ#2RT+Ƨ2]1FP\\,&#(`Y@O]T: }ZwY^g]G{]֮)]i4> 19x#%*R,*v):`es;Gfe31 GvGRR.E0 #(q*u#]GQbQK?+]W/|%p%3d+]wDRDi4>A1>MJg!/W#(~*]d, H?F[!Rb%'du4+)kTѠѠSJʗC#('\錠lοߥKإЃE0AרKap2tΌe2R\+Ƈv\1FPA|hǕ آb" (Vu0R+pJ?A|fOVq*u\u53FPhJAɗ" G\iA1>IJg, >ЎBQ]ѩ߯~yבe[k5c{W:#(`],Ɏ bOY#Ug̟_,`S :kYe, J]l֮Td=1 Y #(?<^Ay+,OFP6kyt*2HBGf%7FP Je4FP2茠gTh׍ Xm,CJ޿hO;[~Ŏ>٢b22OŻ.J *1bE+_2AГɟ))3 yDew~F+)eBXN X:#(kJ͗"Ch+q*M+#(?t3.OFP&y*& Xsw<2+, (JKaxA)" (eHARt߂b" ?F[O1bZPڥе3dSs.Q)"C;]GQd! }#U~@2u]G`ܢ.eDevm,C[b, Xw` 3YΣSVȲA٬]+#(SƏVy*u#1 Xu2Qɗ" _ʸbl֮TjT#(?qe2Ynmܢ.E0#(i/ Xud:wY^?uq*urzבGRJ .Jb%{W2#(ͻRA)nޕ xDew]w|%p3MvPR(UNV2t2FP=*T/(w)2tꌠ-*v)#*T,ҥUBQ22Ore, ȝ8!Ce0RU+Ư\1FP6OLFP&i JY@MqWRgjAȕJ;#(/]\錠t~]h+dI?: }>WudY@k4~ZJgKk}q]GXb,  XJɮ#(O]L~<:r)APԕJᇢ#(*R/E ~y*u#3 Y8! =W:#(`] =#(2AV=*]tt^Mud@Muv);YΣSѡ#(ObgThyt*:`<2NB}sJ]AJJ.JgC?%m,CJ FP ?(8KAt)2t2FP4AϨS/|% #(`Y@T]錠tm,CJ9cxt3|q;kbJ_+SJ aYew;RyLre09-3 Yn3+<,C4BXnCPh׭]G#(W #(A]gpJ~*uAiwe02x h+dzgV, ?F[![TRd#1x7Jfܢ.E0#(`\R.Ţb" ?F[kJ❎+ =;#(}C]G]G|]Gzuu}lVJ "JeZW#(y*u4QE錠l[=A'2(v)uTk JM"C X^7F|%<4Q*#(AV,], mlQKsces1`Cʐ.OFP%*Rdh+dY@I$٭t)2tɌd+&r1X3ʺի:v)#*T,`0V<2+, XV:E.eFev8zבeδzבeF;2yM)ߗ͵zבKTʥZf, hu.zבeAi~)3*T1b4]錠GTƩ/(]d,Wݺud )(=]JJYUFP6kYg,C[Tڥzb>u0Q"  X#(ֽv#FyבeKڼu^yבKTʥXTRd1pH}媛uC]G{Gfes0[@7|%w~P+)(3ٻ;SBE]Jweݙ6<2]ic9KSf,C},`}uβAЃCΉ(KiQi"W]nl)(3], #(`], wFJ/u Xo :'Y4vsJ],`ݼuβAרKѡ'#(/(w)9*RZTڥZkyt*u;nxdVآb"Wݺ6:v]e:rJ?rJ],FP6kyt*rխ{m#Э1zeY@3FP6k׵ E] m?F#]g,2#(`YUFP2uFP>G׭OwY~yȬ,`ݼv:vG"C]Gߥ]GwXnw: ;QQ" u\u#1 YU0*#(r) =u3QңOEΣSgb>-*Rd릾{ׁ<:z)2,GY0+#(fcܢ.E6F#<AK FPr}cWR<2k3Ȭs1z.1ٻI.QndFP#*T |% > +)Mc>3*T+*_I!,C xDeJ}e1إARPZz)rՕh+`Kf>%*Rd1zeY@1FP6w{Ȫ2c+)eh+dY@-GxUW#(v):A<AK)Q)bQKQ̣뾧vrJz}>uv|zבe^{YgY@[ӻ]ѩc>ʲKefGX&#(v)v1#Unw٢b22NEΣStcܣOEΣS E]=vݺudYG٢b"W1 XnfFP2,GGv4F,C آb22Di뾻rJ~)3*T(@>M xwK(((A\t}(pe2ٻnì2tJ-*v)TAd11ze: F<2Odh+drb>2tΌl2t.e2zeY@GY./EPpJ?)W]錠e2rJY@|, zz)29w+)gTh}s Xn[#SɿߥL X料%:mGXKKuz)}Zb 2QnQi" hk׵}kTAϨSѮh+v)2F#eVAAA6HARd0 Y^vGRR.E#(`Y@7FP>G}]G[#}eߧ;Y(e:rJ~*uaG\R.E6FP3*T,ˎkTSѮTrTfb8AרK}gThMc>:tgܣOEnwY9E.oC|%wFRR/wFEɌ[Tڥ Q*cy%El|%p3ze].`29߷nQi" h_PRJTʥЩ1zq*E&#(`֑QRe\Arc, "h|%8ҥ? آb"W]Gdm, ((S鲀bKTʥХ3Q]WQxבeAϨSѮTZTڥ UuVȲuudYǎgTh׭W#רK}Za\R.E]ʈ8u«ud#(`z0Q]gV-*Rdh+G~RR.Ţb"C]֮Tdfe(Yu6rJ]d<2NE'F\R/E(zcev]7F,C ]GGb&jt*%*R[g]GQdXȬsJ~*u]AK4F#]ѩ آb l֮TZTڥ~yׁff\R.Ţb"C}M0+#(`Y4F,WlWR8gQ#(&1Q]7[pQإLQ#(g̗"C Gbu;.Q)C#(`397`Ʈs]ܣO%й0YgY@6F,W:2K JI" (?F[![TRdlJ?2tɌGY.,#(/(w):`COFP#*T,ҥUW3eZv]sTjc<2NEm\R.EQUF#]#(`uF, hu=A$ӽ#*T<:7`ݻ,CL#ϨSѮ[_u:,`vwY6FP6k`>%*Rt =ݺwXgFP5*RzTh}eA[TRd뫶]֮(רKǏV3*T<:Fb,C},`FP,`#(y*u3zeY{T*uur)-*Rd1u뻫]GQ"C#de_"zv)CGf~莠KTʥ((OQ#(?~D>ՕmEɌGЩ0.Q)" H(cMN=, MFP3*TzJϗ"Ch+q*C Qdc>2tgT.E#(`JaJ#:r)u:aY:I^_2:=²u>Ȭ,`}2uu ~6Ȳ#u,eF[![TRdVA٬]*QQңOE5FP9*RdASѮTt X0,{M:veF,C[c>=*T<:r)2uFPGX&#(`Y@OGGb9*RdaGP2uJJYwL\u3>k}yבe~u~*u }a?:E.E0#(d,Wwn?: =#(`Y,GXnVFP =u3}s}({׭ QE1FP6{ץ-%*Rd0ٻns QSg, Hq[on@\~e|% H_IdFJ r){J 眃ԨK|h"Ww40BXu(JbpMARdrJY@ilnGa;zeLFP2tM_Pw)AUW QkeevG" (SѮTJTʥjg GXΣSѡ'#(`],veG[Y]]GnQi"C*u$ NU;|E.eFeJ|GAΎGGvGCwFP-*RFTƩh}g3uz)}ecv%FP2FPl֮Td m xFeJOARdASѮTt(&#(`k F^E.eFeR[g]GnQi" jk׭o#UάŻlQKѡ;#(<:]d>2(֮TrTFc,WGXnkwYά fb<2OEnfFP2::rJ?iu آb" {7"}(p2zab+)sT4Q#(!d>SN?F[!Q#(`]%3z?WRѩ)Y~*2إq8g]1֡;QQ22O&#(v)2\p2tΌKTʥre,C&Cgc>5*RzT\u3֡'QnQi" ~󸣭<:z)2tIGyFev]ɌeRAНOY-厠ee2إ̨LUu߃pּȲG#SѮ[4:rJYYaYyׁ֧ͻC#(~)Dk׵}[Tڥp.D\\um0d>M`VȲkGY`f:ec>ʲASѮTd^,WFPuLiu`>AГ(#1zaAKإУ2z?6d}2<2+, ~!#(~*u,` ˼Ȳu2:vG"CSyב-*v)vGE]>u+)Jcew~GAЃ .%CGf((Q*#( , FPuSo:y*CΉV2t.GY+#(/(w)2tnelGyDeҥ4:z):tges%.%G%_.`28_Ieu;WR1ήsn%%K]2_I!, kyt*2ud]ʈ8j媫(Г<c,%F,CfVrJY{בeA٬]:#(z)6A٬]ѩKh+8ΣSחdû,C#ϨSѮ[g]Gn}4:`esQ#zc<2OEQ X֮?F[!K}eە]֮[]GGcܣ/E0#( wY?F[!г2fc>ʲA[TR/7wr24FP6{it*%*RZTڥ Q_I!,CNJ a%1ɮcx+)edGTƩ_PRd3zez0֡'#(, 'F[!רKQRAKsf, ȅ(Si)(-]]A1fA|)-*RtwY@IGYP #(`] ]u3zz)=*T<:Y@͌GYP #(`je?u2##[TRdѠuѠ2w4?:,kyt*rյ آb" N;YQ|wYh+`:+eVrJ#(AKd<2OErJY@όl֮TJTʥн0zq*u2Qң/E֮,`m,C}8]}ѓwD\,; xFevwɻ\R/Gvwɻ\R.Ţb GXΣSѡ#(`z2zy*u33zv)2,l֮TJTʥfe,C}뾟]G X0[F(Q#(&Jc2u_Ug$FP6{iY+)((KQ]ѩQ*#(`19(5_JJ]@ges.D\E.E0Xz)=*Rdh+. H(KSce󐡓1.Q)bQKѡ#( GF^, gFJ#:r)ul|dY])%KѡpJy*u==A\dc@; D\E.E.u1c>ʲ2A٬]WB.Q)" ~g#(q*u ]3Qke, X_5:vGҢ.ElߥKإ̨Sc>ʲAZec;=]7:rJYgw, XRkYe>5*R,*v)u5:vϼ:tg>:`evG ec>2tόGza, pJ?,#(`Y@GY0~֮Tdyב-*v)A٬]7 QGeܣOEn4FPr c>y*u.`2zJ?nwYϝ:KQ]ݻ, XGf-*v)u~u`ﺾ|%p%1Q]ѩQ2#(`0zq*u};ιF^JJ? J]..3Qb)(.D\, mu oû,C xDevA#רKcxבe0zaAUQQbRdm2Xb>2FP6kyt*GGv#(r)e6v]1 YpJY@όu$32:L:8 J]JJ]AٜSPr]`>=*T<:zwE],`dFP6k׍}eAdYy*u3hӻ<2NEn6FPie =#({ JORR/E{׍}L{%sJ&Jb3*S0^Iܢ.GJ%.EAГ<, ; F^JJ?"C X, uF<2NГ(רK|, 7F#rPZ:wFPh6Y@+?F,C?p枂ӥEWR8vG/EP#(q*ue2zeY@1 YPQQoF#߷N#yב{Thyt*%*Rt xDevG"Wsё=*RfTh}(.`2Fk׵ X2#(`Yw (ϨSѮk,5F, hf:NEu xDevm,C[b>=*RdA٬]ѩKQ] KTʥ1Q<:Yw,䎠l֮ٻ\R.Ev, f;zal6wF^ =(ϨSѮ(ߏ+G#SYA1nvF<2OE;]G[z(Q$FP6{it*E E]ʈ8:NE.GD1FPӕcwF"C}eTAuJJz2Qԣ֍t#רKQ駢]*oT:rJv)r}_o]֮TJTʥov>3*T}#K FP6kyt*%*Rdc,lGP6k F^ }15wXnSwY;QnQi"Ch+`:NE02#(`Y(GYQA٬]ѩFc, }<:r):tg uJJz2Q]ѩUBp4:8uNlur)y*uټȲul|dY}֮Tthc GyDevݜkTph)_I!叽: F^J%3ٻNSɢFP-*Rt撂RҥQ:#(`e0LFP6W:UF[!K& HpJ]ʈ8h+dreܣ/eFeJARZTڥAWR|Qg\R.E>oudYymu`mܢ.E2#(<:z}м2te3Y FP5*Rtf:NE`V-*RFTƩhYf\R.E`\uVA٬]ѩԨK+wX&#(`z},߼q*u #(~)^A٬]ѩU)ͻCFP><:]d, Xg##1Yn̻, X#=*T<:r)1ASѮ =BļȲYA٬]ѩaudz̻ds2.Q)wՍ}z{%%3ٻnWR8WQ#(.1ٻNS)tFP-*R(ucJ z)=*RfTLY@ʌeTA U<2;:g],W:YlNiy#βu:~*Y^YΕpJY侳Ѓ=A[TRdA٬]ѩFb,`dFPrՍf:NEpJWe]/ud3إSѮ[']GnͦwY,`vFP6k E]ʈ8:NEoӻ^]<2OŻn/`^I\Ei(fﺹOUp.LFP2+)GTƩxit*2:xYN<. Hp2֡C#(`Y@eh+d:'FP6/E3#(.JWRht*%*RZTڥ/ves.wFP2wJpQ" ~e#(,#(v)2t錠l.ߥcʎ-*v)3*T,;h+z)=*T<:fFP-*RFTƩh XneAϨSѮ=ASѮTd-1 YN2]G[ee F^\um0q:ΣS)Q)Ң.E}헒wXΣSWJudYw,;xfȔwYL)yב{Th}Al0V2tόl֮pJ~*u1|,`$F[!U72#(d,C-*v)A٬]ѩ33fYAU-%:A`evGRR/CuBWRѩdQ #(&Jew-ܳefeފvwҢ2e1ve1AϬGskEt+#+Qdh1rlQdVAUgʾ:댠e6AAq֮VZVڣl1^YY]: TZtٲb" uoJ#Gבe (=\d6|? زb"C]UAGn FP"(eek1]ѭЛeEpztXnFP2e`A`lYGѡ) ؓ,`/FP3+Vfwx Q #(u+u] زbUOR7$m(:nrI aˊ=ʾ* HpJ4FP ]:#((#+QVV֭tz0nYi 0FPIQt XP;#WKE زb" Y2ˣG[a x~" h,5FP2t댠8kEt+fe{DבwV*vu]D" 87Wfe.ܢuۢ2g: }Y-, 8 X.[߮,<2egeߊv]D"W]ߌec X[tYpn-lYGGgY.[ X0&#(uc1nYi" ʺ:+\!G2Yٷ]ѭA[ٲ3FP媳 X?FWEt+=+QfVhם]G#GYYY]wngtg?=A#(u݊Ap X\u>qFבes8$eG X'#(u{1nYiwx]D)8GU}'){V(ʾo$=A[V( ]~\!Х0Wfg{Wf{VLQ:#(E>C$pJ}YY`Y')kIJ-ҳŲb" 8\?n5)> ] #(y+{jelYGѡ;#(CP#(`YO\`z2l:f\Rede<\uv][VڣXVQd3]ѭAϬ[Ѯk \![VQd0':\u>qGבesuJJ}Y[玮#Ћgs-+Q,+(rՍvݹu䑕(3+V`C#(`ˊ=ʺ1AUwnwtyfe>,~\kYe, 8f;nE:#((uhcY.[d<2oE#(`z3أ揑+u^Eבe2wUudY1r]+xz#({YGPO]G+`O]G| X#(u{0nYi22Eg=AЋ|]fY.[{OR/Q #(.DPM D1FP[+c g?ʼK= ]#((AX>I1 g?eE#(u݊A[ XJz ٲbo[Ie}GP3+V ];#((#+Qdu0ʾ* 8`z2^YYde1 زbo綖?Nv]ʼ!C e=,vFP++VL:AGѡʾ:AЛʺ% h?Fe2أЭ3em2Y" ?F^Wם]Gϝ#G]֮VzVU_ ?:v]D22Eތ8k}?/,ܨ:eegeJ g?ʼ1A1 xee݊vk]u䞕( xgeߊvBY" v]D"WuFP2MFP2mFP揑+䞕(2,|; vtf>x߀wtXI, ʾC/FP#+QfV歘,UF[VڣXVQVV֭LY{:em3Yd^BY22E-C e=,/FPr꺈nE>]Gikםcg#GɴEבesn8kםk#Gѡ#(`Y֮VdL٢2MFP揑+dY#(`YeAq֮,`nFPc X.[iYi"C X*#(u݊ #(`A[Ѯ[xz1]ѭ6#(`ˊ= #WGב{V̬[Ѯ;]G'GבGVƣ[Ѯֶ]G֡'#((;+Vjy7;:O ^YY.?w2Di-+(;+V}"2AOQ&#(ΦCoFP2t\!Gst"(`MR$E,lFPc xjc<2EK=JJ{NFP;+[!̮ҌG{VUwN"CinYi" 8.X&#(U^e@^AzgŹU7*#W2茠eAq֮,`,FP2BYٷ2d߯ <Y"CdY#(( uDבWV֭hEt+=+Qd,`FP"(YAq֮\u/2ڈ#[VQthcc=XtYp,,CCEׁ"Y9_ G]G^YY],ܲŲboEn-FP:f, 8ik e= ;#(`Y8kEt+rmc7')8N[FPC xb8Gm?v}"e=eeAq^5)>JB X.eEP#(OGɮ Y"CGᓯqG],W9}"X*C Xp~1usx}+Mn,<Ğ|e6X&#({z1 X #WCpb}"X|Y@7FPb,C#W2(8OY[VڣU7#(`ztFP"YXec3]g+䖕(kWV*꺈nE>Wtyfe>,`:vݹ{Xu䑕(++V,`FeȖ{zNFP,_ +<2oE< _udz5FP#+Qd uk0Y" X X&#(uk3+dzWFPvc, ؝eEރUʣGnOFP"(#+Qt'v~BRA(3u珟I !1,FP_t]'D)eAe0dA]6NX>gm~"" ?Fe2^YY2dAjg, ,nFP&,UFP/)(-+Q,+(Aq^6AU׌ʼK=JJ}]dC/FPq]]ѭG|O=roE.[#Aq֮VZVڣЋʺG>AU7~\!ߝLEבe"<]7#((++Vf, #WȖ{ #(uAl1Y]ѭG珑+䕕u+u_/ܳefe>,v\媛R+H,+GP~:ʾ~:rJy+u]U/udz0YnE.[ѡ#(`z3Yٷ]ѭ +#(㗔{Y-+(28[IG| xfeފvwKtY^GVƣ6#(u4>%A(e0]ѭ Qe2q;u!nlFP2OR[)&#(`YgVTLFP-+QFVƣ[i:b,CأyŹפ(#+Qd3Wם{#G }Jʺή Y2EiwVDU?wv]xz0t Aʏ+䝕}+&Gx+²AqVd2A?x, 8ήc X?FgVjay֤(2tmeAq^:Ad,W]݌wVlB xfe" X>q~BXp AzDׁ"(sxeVxgeߊv9%: #(`#(헔{Y@eA#erJy+u߯#У2Y" gnEeA[ѮVFVƣ6#(u݊,#WȖ{}+$eGXg<2UYۢȲ&AAڢr}tkEt+=+QdN]G|xYIɢ#+QfVhEt+r}/w#Н=Aq֮,`BWaYnUFP5+Qd1n FP1 زboE.[Y X8k׭ X?F-+(++V"vc,CsC:Ȳ']GYnEajFב0 (e1]ѭP+Sst]7~BX.eeeeJtFC/FP:f, ?F,ܾMv]21l2t][VڣXVQdc xЭ0 X&#(KAm3,WFP,FP=+QfVd%]~hGVƣЛKJ=,:eegeߊvݹZu䞕(3+V"2WfWV֭h}o ]GY22E0Aq֮A[_R(1r,WFP;+V,#((8kם{]GnYi22E`g:[ec Y0 #(ʾU: }nvtyee݊vᎮ#Gѡ#(`zF,l2+, 8w;<2oEwtf>,`FP++Vve,C]GأvgYnFP:1Y]'#(`vto!-Jc96.2AOQ6#(uAI *Je<2eeeʖKcܲEP,LFPOR\ѭG|w2Aq.6#W2w2I aˊ=,ەGPkIJ-"WORߟ?I~IiGY" <֡'#(]n+dY@k-+(;+V ݌ &#(`Ywog X+dWFP++V`,W]eQBv]D"  =#(u݊,`#(`YMFeh[|\zDבeQ#(y+u3Q-+Qd7JtY=v}:rJzFPUAq֮^ X9JtyeeJ X#(`Y1r,Cvݮ{VvgYnFP26FP=AbY.[?=I ᙕ([st#((Ks2ѭGأlU2{~I g?ʼC/FP-+QFVƣGkIJ-"COR[VQd\ujRZ}(:bŹYe }$ʾQ2ʣje,W]8 }$pJ{(:Aq%)<Aڏ+dY@+8kEt+2te^k׵ e==AlUuEבe֠EבejEׁΡEבkV꣌G%hu`nE,`B]֮VFVƣFcY.[n,FP:fnE.[Xa Yv]D" 0c<2E>wU-]g,6#(`ˊ=ʾc Y,{,g9AG׏+dY*|ܯV~4ztY>7^=, ~gsWգ#+Qtvѣ-+QdGבesգuJJy+uA{23OR[VzVLQ:#(-1]ѭGѡ'#(S^(eE.?F ]:#(`Y@1FPAqM~Be=xY@-|,[cy~BX>w$,,ْGP292t+8WY9k2, h\uA:?IdAG#W]pJ{Y9H2nE7FP2t-+(n8k Xތec x2ҲE =#(K0#((3+V"z2Y X|lWEt+2audY02, 8w]֮VdaudY#8kם{#ιߢ2\!+]nAG`Y.[ xde<ʺA,`FPUAq֮[ :#(`Y8k-cܲE^ec Xn7FP=+Qd=Aq֮VdAG܃XtyucЌ#G[?Mv]2]ѭGLFP릟'.R~\![VQ(:GP=+QfVt')e߫AGѡCOFP XP\![16FP:Adyb,C#WȖ{u+K #(`UFP2tem3e\u}01|bG?I!, 8'Ů أ u<+, 8GWfeAq֮VZVڣ1rnE:#((2 FP3FP]G1rnefܲEs[udVtX.[Y" Xg5AG,c, XgAGY ،8k +#(`z7FP>ʬF" 8]G)wtygeJc+GA-=>IuJ0Yn%οZ=>IܳDi(:$$Exu+CA6#(`B`JR<,LFPrՕ%eEP6#((c xе3YbYGTcyפ(#+Qdu2']w Xld,C\JRJyY@1r,CO]֮η#d, ʺٲ-+Q,+(Li:vw_tY6c, v]D"Cf,C,{gnEAг3]7pJ}(:bYnnFPc Y #(`VgYn#((A\])Dב[VڣXVQdR\k}W{V߯DבwVh}WeJtyee݊v^e{3[ڣO ޢ4FPAwQ#(y+ug$Epe3أ[Vdc Y.,{) (xtFP/)(2A1֡'#([^u,rX?IѭPGAGAq.r}G`$E =ORϬG#W&GY"Cs e=\um0֡. 8Wf{VVu #((Aq֮pJˊ=,#(u}1֡7#(u+urJ{zTFP;+VvI." \uA]]RZtY^GVƣ_Xk}T]GύW#΍W+8kEt+2-FPsk#W-+QdYAެEׁfc,  XgA׬G XsԢuAk1 xgeߊv1r,C xfeފv]D"Wn-+(Aq֮ۃ,`oFP3+S$c?I!\E錠GVƣ,Q#(]wEDYM xgeJm ?I!<2eAqns:aI aB^YYe3#((;+V,.FP#+Qdc JR<,FPs:auFP #(`Y@3FP3+V\um3+dˊ=,FP,GP2togVzgqu9k:\u#ЛgnEp#, 8G]GGgY1:,`#(`Y8k׍ X#WȲsxeVXp#]ѭ6Afe":ʺ" 8i , 8d#lud-\KRjyYCEב~5c]֮VZVڣ#(`]f9οl$p36Qe2]ѭGY]M e= }$ ]#(SP:#(`Y@eb8/LFP5+Qde1^lFP ]\!е0 XP;#(')VdsOR]&^ .{ɮ `2')wVTY{`Y@3FP2yrOR7B Xp],W9u3 X7#(}+u݊ =#WȲ1Aq֮VdA[VQdc3]g?F媳 Xv FP3FPeh[8kEt+ی-+(r#Ws]GY" 87+]ѭGmʊ#]w:AЫ0roE:\uk0^YY]ѭÊ#+dzWFPvc, ؛ʼ:M xde<1]ѭtQ:#()do%οeAХ0rIٿGclLFP2tY|,[fϬ[)2929²cWfedu\KRjyLI anFP;+VZMJ"C#W2tk8wY9n2+, 8ͮ أm2%f>1^YY]w~* A[(ʾ&C?$x)JcŹGᓟnYiC#(`Y@8QAЛʾ#WȲZAu0,.FPgMʬ22En+%C X#(`Y@댠wVl FPfgV*s-+QFVƣ[Ѯ;]ud: :wtygeߊv]D"C[]Ghvt`6_ #Wr՝ߘ: =*#(uNfGבec2v]DҲE`+dYUFP1Y"C[goE.[ѡ#(`z2dAfe YAs0erJYjX: `_tY=^YY]/ܳŲb" Aq֮VdAAq֮Vd{3ڿcI -Je9^V(ʼH #(!dDY8AGأʏ+)CY$xYp8/Y@ٌ[(3rlYG`y(=+Qdj|,OR׬GY"Ww^$Ep')[Vڣ#GP;+V ݌ h²^npJ{FPQ֮zAGY]7#(`YGVƣГgnE0#(`Yw X*u`:+ehgVhYc,C[glYGbY6#(`fc yfe>,`.FP>W}=Y"CUusU-+QddZ;+VFבeߣpgVhEt+:f,C#WȲAq֮VdsudzOFPAq֮VFVƣ \$Ep2럺 RA(AqV(e3dc Y.~BxA`9N[1dUiuJ\!o?I!n_x, 8Ʈ ^I aNFP,L_ Y" [$Epۏ+dFP#+Qdm0ef{VXVQdm3Ac[Ѯ;,,CEבeAv]D"W9YtY>+kםEבef-+(:bYnB xfeފv쌠e9A1v]D"C#W2*wVhם?[tyde< #(K:#(`Yڌۅ+䝕}+u2 X;#(ϫ"ivFבe8Y啕u+u݊A6#( 7u]w xde<1~#WM زb"Cs/#(J-\!OQ*#(CpJ{Y@팠eAq6NFP:b, h+dY@댠8ϚYede<, FP,#((:boef, 蕑+dwFP>^uJ.v]pJ}YyVu++V }'.v]pJY茠wVhEt+:`, 82+]ѭUwN+lYGtEׁ2Xg,C`Y;WtY`,6#(u+us1r, X+dYevݮeAϬ[Ѯۋ 7#(࿡1{낗(:GP] xfe49ilv]2Ic(M XpyRu\eVAZg, hKJ=JJ{ˊ=,%)< 7#(`zB Xae1Aq6uAʾ)C[gܳefe%eE>Dmv],ʺ:[{V,`FPcWѭг1CsLc, ?DP{z1]ѭf, X+`nE^,`#(`Yڌ8krJz7FP=Aq֮VdA[_R( 8Ox}JeeRAqWf5.Jg<2o%olj:Q{UA^\I6Q+B2JPDEOETTG䥈 YRElE P"*n (Sue)BkRENeu+BKݒ"*`uk@+)B_}EV=)B?5ETnίY׫"*v*XzSD쳮wETVx"(b+z1ր"-ه}u(B2ފϺDPTG ي^Kp̺Y.RQ!wS"*JJc0BnET#}ɚuǬ8sdͺ`+:7ETVt̷2y(B'"2>5LETP"*dk@MZQ/+VETրQ![jWDlEKf/uրQ![ih=k]݆"*d+-ETր=fЇ"*z*X})B XFQDlGݨ̷RCGK%f0"*u*C5`&EPրYQC"(v*XVWDnR4ԏ2LשnYѬ n> %EPqˬGoRQ!wS"*d+: ETnv~:#*z*Xj=CIGsRDN}k~+Y5Pr(V~P5낭]ul5 ETŎA[ѥ(BKSD\+BKq?QQt(-}k@ǩҭQ!{C5.ETրA!kYݚ"*dk@늨m(Y[RDl IϺQ![zWDlE يI5ٽŬ[ٽŬ[b֑}-f72l Iub֑}EVQ![fWDl SϺQ!Si"(du++B.R>"*y*XNӅu #pΧ?J3)BLETϬKs%s1%+B?2(Pf(Vtǩ)B.R>=Q![TDz*xK:VR>?5`_LJ g+zvJ lEK\I\N|~*X%).V~ǕTGz.^RRN|/)B.y"*d/z)BkREl EϺJ;QǬ#XjWDl hOENeum(B yx+>"z+Vt"(bk@_#)yf]DoŊWF:8QYGY_ ub+z1T[րqYck,]="*`us)BK8V|֭ ي^E5`5ETրQ.EPV~-)-րQ!PrV}G+)<Q![TDl Km%EJxV>uk%q?Q>5NvVRSiP"*l /f]p9Q}vJ c+vET?/eв"*d+-ET:z}k@A[zRDlG0H+)ݛ"*d+wETTG3fgHQQ![FSDntETvͤ؊E="*`u)B̮ يKϺA[VSDl X]E!B^ފϺ1Q!Sp!ED>]b։˩b O+Y7"*z*XtŬSYogꊨ۩2Oef ?h@Dl XKr?Q֩"VRSe2QǬ_bE\L))] %'EP1~؊U"*eJWDRElE yt%-i?˕5>!BԬ؊υI+)5~ J(րQPQA["*y*+gEPVtNejGQ~J c+OETVA!7kȊc)B؊&րYQ![wx]Ϻ9Q!S0"*dkʊ}EVԏb X]5` ET>TDN}ČnmyV)Ur?Q֩\5k(RElE6VRg+z.82Q!P2kOS"*bE碈 "*~*SYoZ^RDl ("(f (S]"*~*XjRܭ5+BVETT[Vtkb 'ݬYlE yCXTDO;RDQq)"5[NlEeY'Ĭ[`u,Vb։oJ:.EPրQ[]ӀӂVRSo%f]XaD\L))]2e(YprJ z* S"*u+:eEPVt*+ր4Q![9+"sQDl '[f5`\f]p;Q<QPQy*B䥈 ي.ERD\O~q*[IWKAS)O[j=쨻/&0R?.EPT[րQ![-+B{RYG.րVQ![ٽƬQ5f؊IrMRG З"*`u#+".R>5`ETրDP>"z+V)" gZ;NZIa g]DoŊGqb։b X]Ϻ"*z*SpQ?.)C~v*LS"*g֡)Mr7+B^LET͊NIq5%+BETϬx>PJ c+,k%5 EPT[PF(VtQ![TD<%+"KUDlE xYJWD\N|;PDlEͲVRm%E{TQ![k[&DTɊf0PDl O+vԍ؊E}.VRSYog芨DPVJ5j<"*dkjNef !B~G aJQD5ZIa\N|fJWDJ(րQY="*d+z6ETրQf~Pb։/J:ߐudufY'~q^.uTGtET1Ŕ Q!SYoC)XVRS"*u9FPVtΊ\Q!SoYPD\Mǩb (Ir P"*d+VEPÊC5NETրVA[ѭ+Pf(ր6Q![ yQד"*d+gETV}4k%5`\lyߝLj يoߴx(ր`uv*xCpܗ6Q![)Ƭ[fVDNeu(B"*`ueY'N4d-Ӕ 8fĈ J(TGYLET1 bU.؊NE'j%Es,ξ˩CI"*eRO~k@u((Vti۩b (]m%^ 0BkWEl 9 pH 9Q׆"*d+zx|+:|}k~W4bEVRt(%}kHEp!0GWDl Z:5`EP>RDlEi@mexQǬ&VR7S"*q*Ĭx94K(9)"PDlE祈 8fـJ(VtɊ]"*|*SieQVR#*d+z_6J ckl5낭)NVZIaN}kPQ쨻؊nI?5`\ǩ2CGGSDl K5`VEP0"*d+zvETVRnUETVQy-@@t+Ŕ Q!SYo%fTƈ 2Q!So%[ VR[ѩ+Bn ETӔ 8f؊S5TEPրQWk@2Q![5+BnրZQ![PDlEQT[ր='o5 َQ+ ETVZIaN}y*L+ETVtob ]n\Q![FVDu̱b։Y'NlEk+f9YVVEl XEYGY_\2J*)C4e)Y7)VRWS"*nJSDNejEK2ZIalE yQ7k@YK(5+"?]"*n M5vETT[Vts[f۩bG]]g:>5]*B xCYXm*B2L)ͺ}3#(r*Sp|DTp r=QP"g(FDl S}݋VRk@RElGGJ c+eETՊn]5-ETր^A!7+wETV(۩b O.bEϮ يC5`UEP>PD\gt+Ӕ e6>rDTŔ 2Q!PRR2J5%+B2\yhψ يߛ0A[rQDҡQRUJ ~*XjQnapi%VR[u*.סX7E+)>Vt57ETրQ!So]Үb Y5`TETT[V芨YA[PEƌG ~f] E+))]0e(~f]4 0댋)Ur?Q4Q?dDT5A[ѥ)rFTրکYck}4*NV5J ck~ISxˬq?QUVRg+eETVtA[K[f5wETTG?5`$EP0"*z*Xѣ+nסXEYgl K="(nEϡ J(ր9Q!Q"(du(BWSD4#c%J bJUDMi)Cp:h "(b+:WET˔fEDL E+)}qh%56ETTGRDA[ZWDl hKp'EPV~TG ZPDlE؊K5`Oe6YPDlGݾʧVu((OFOePD0xRQǬ˘ZIalECZIaB)U4e(YqJ+)Q![m*٬-lKE+)A!'k~xˬ5`j5ͺ`;oq0^J̤ 4ͺ`kz"(bE )>0n ETӔ 8f}*ZIa\MY{(9)"^4ET1 ަh%]"*d+4ET1 ΨM.О؊nYݪ"*a h]5-ET#^A!PQQ![TDl CTDlEoh5`]++r يoi%ܝVY)]r?Q)Sp̺Ϯf]p %5EPVtZu((9}^eָ'A׬ LET5JMŊI5ETրQ7+FFJ c+z_J ck@u((vKݓ"*dk@/5WETրQ![}(Vt)"̩؊^EWUDl YjsaiJ xEoQ!S"*uJz"(~QӔ 8f~RQ![eVR[JWD\QVR[R\ӡQ5)BԬ yz+͊U5]*vԵ J(Vt-Ь{xXA[[VZIalE0"*i CuRElEϤ xCYXfUDl S="*7׬ۣxƬ? n%x2QǬE+)K({i%q3e*B,ET1밺h%]"(b+,ET+=,f1p"*d+EPVt5-ETրASez+͊E5`uT[Vt襈 0"(bkhuc(BgUEl C5`%EP_^h%4e(~.)Ur7e(B^̾RMG(]VRPQ}IF+)UҬ  8[5+"[REl bYck@c ZIalE0"*zJ>5` ETVXA!7k^eOK.xx+݊Kr>QҬ 2ʰPD/)k%q%-EPT[Y7qvJ J)Cp:~viS"*d+ETVtplۭU+)uJdEgր>Q![$ASo%[ѣ*B.R>5שR1Q!["U+)ǩR*"~7JVRwS"*}FQ!WS"*aPDZIaM[(%+"_ЪÊY56ETր:Q!PZR4e(rs[f+fQ!Se_j%EpsWD\MG(+BV>j%]"(bkxˬ5vET1᪕VղVt++ր6Q![m*B{Qܬ*BWM.؎Q![c*րYA[fSDl SWQE\N|kk*V9ZIaU0ET˔ 8f]T n>4e)rsYG"".r׬ RDlE7ZI2{GE50e(סXѵ*"].x|+ŊރK+)ԏb hM5EP5JŊS5`\u((րQ!SŊCp Uuͺ`+zNETÊ^UӀ:o5n ET3TA!Ǭx!5rwS"*u*,+^AS"*7uĆf]p1(B?5`R 2Ь L+)QǬ8Ei%q6(Bnx"(uGVRPwS"*ePD\A[0 YZUDlE'"y5- [FVE\M1Q![x v5<*B̩ ي^ETDl XKӀ_ κJA!Ǭ[U+)k(5`tgq?Q3j%EseETTG{8f]ƻ*0.Kq 4e(Pz(Vt{(*" 9f]ƋW0PDl S0"*~*SYoeYfQEl CSt7[IѰ¼i%q6e)BnܗAPJV70R?J2*V)BJ v*XR\(". ETVA!Wk,+"PDܬ5Q![RD4Y#iJUDVRS"*JW()BYM+)ݴxンuUg]=؊^UӀ&[y8f "(aJQDf[Iڦ9;4[(퉠3Ɋߦ40.TETݔ y2"(lEj%Dt+#4A!ǬJ J)] UEP1p >"(b+ztEPV`8fݾjJ(T[Y+M+))Kr%WEPVtn"*jRDf]s-AWS"*="*d+z%EPOs$J <r:jZIa\CKq%EP+A!Ǭ+*.)ox.EP1p{7"* uݔ yQyج\C.C.؊gTYsJ }'"(fJUDj%S4ݴx 9fρ.Q!PWYVs(i("nx2"(u¸Q!? YB!Yp̺zZIaN}aRDn*C.XEK1fq7e*BK#yƬ ۦ*zM+))M3~K19댛)C ^Y7qVRS"*|y59댗)C3&մ¸"(&g]3f/VRPZUEMg()B~fTJp LEP#q)"^TETϬU3fq e=St8ZI̺i%q e_Au=A?EwLk~f3xQ?nbIJ  \Wexf[I1y.J hPڴxVRPA!Ǭ譴PӔ ev.[fK(*"ZIaBUr̺0Vօwx 0낟Y0.Ԧ2Q!PfQ̺[VR[k*BVV»[fW(+B~f.VRPAPrW<`exEu=AP֥ even%R"( A1낟Y|%θ"(CGy-?o1<_k&Nlwu??\־`6'߸ӹ#>'+J?皟daopO${j_)sV<)ާ?ZY<%-)_[3S3a o*/W o cԃjW;w6lj~ޗy}[qS2YXZic_i:sƾ"_x9mJsK[~x'"?+7kqk}}^-׻}lk_k?Wf<|r·K +~?ƫm(քð3xBbe_ Q~_,=%^Xן }'x`=z%\]Ǒ>ݏoy:'Wn o;>uts [|P ˆ&<yF]z[7絊6-|_cY~kp7|Qs%'NF(4WCO =?:o!"<ϝT?a>7}v:`{%>L}?_3/{y_?oO/??jg.? Һ_w?M~쫆?#_?/ǿoݯ$D endstream endobj 93 0 obj <> stream xڭX͎6 )%J`E[Î=SP(/yݶYIG/?X$`!~/|};"E$-'#@D;ϫr҈>W/-_Wa)yJe,tue$[}9)mϒo˯/ i4yk.$'M-L&\t-_J[,seMRgA`P(U-,ݥw*ǽ]@%Ef}1QR`/[&|Lg5BnN5IMvc|V4=C =~Nn Ʊ|`Wt_BTB[ܒP%-g X;t81o:,}~YTNnD`EBIP\>HJbe T} SZJ .n9O)#Ýs+Uj "۴ONN\֨b0lI m>TY`i$,Y)6}Ul >ݨ ,7.k<g6 `YpWkcB59*Fi ZT0Xt3 Ҷf0& 3Q u+>w^HAwNsWU iY3s iij)@y;d=nv撡(|̧Y&stJdȉlx+4'b[W4ywC`s67DPPz[֢䴲H)_JV6RH_4(tY`Y-^W,a.9܋Ɉfh<mi4Qu|L9O%S8bJI.SvƴM% Q;DxHTᕡ<Ȩ {%YNJބt'\7/zW%~R64"5`! TZ=Jd'AД&[sw:) ^3 #jn gR>kosf+0Loyw~d@q=t'g!HbVFv$Y~|B< Pw{Ƴsh &8'|k׼d:dشѭSюѱä1bj3֭ HT4A^idr_Lr~˃|d$,Y3 I+%?O8}?ƍeu}"5{2QvLqԯ_;{ç,;D8"G P* h֭H:S O!Em};6ա $T<'aV9H@ C*> ! Sx+ʄP㯆?v0:f:j #{ף%SjS2u|BcH$NpJ,gSǷ i"|JѣI]=+aP-ޜ̵xJS8 /DiA1}bdB'< \X<@83ME pe!yXC|JkןYLA endstream endobj 98 0 obj <> stream xڽZ6SJ-jPrK \Ce^?Dmj vEQE^/H+'^ݽc"ǺX-| Z>>Jʞǃ 8YuŻr:(mr& ޯxx4՗'A8s"{ I rPB3O]Z?i =7nH9i \y 6օ%@4*K#oKH7Lcm"~y=@ypĜN];I@9zi׶h*QC~qLYԤ4of/WVA1F=3(z^grPB"YBt=f׏Z]i+/qsPƳh6>|]YQ$eff008hX~IKIʖ$F3mˮƷ_k0i-3 Y"`m٠GIMp]Ga"P`@h/ӹS qr'Qp+٬d!t#oDNXDw-׸E5.n%*ĭ*N(2nEY&p3/B3bevk&4=͎meVUo=>I6h :*egH*zyA"hcgXK/7I/? "ֽ='( -+xj+j>YpjcD`k*1F-MqT>xiW^k&'4Y*vNnPXͤPVc&TBZ2 >P䀹y!Uh2fl'/'!/ǞD4*mrY7t|(dY)0㓍# F;FQIV'(> >Lب%!TM `,\Z>UT vǧ،1hB2%(nѠ_jV֢#STC3z5 ZML%g-L9D4 %[BFS+S0MpVn4CW9bڪ!% (S>xgf9GXMMB9[Z\1. q!l]9Z/_#r dGG[m00^Ң uKnb_@U}"TB׺sVS$qu-BNmauQ▼XD;'q7:X{>ϕR)J!Ő1%Gd Mwdw`1P gF^lhRiZfm>ps1TB+cWVDϷK*c3Do8/C.2noD\?= (t :_uѶP*+Q=-+Iؓƣ$ocҩBu0u$T$i>I=}?d<~TcE:VezL_+PM3ب[?P)繺P ;4Dlsbd, cnMKg;D%QW̸/XeեnCOEw_R6.yavvdk֏=(N5<Ĺ̒gV4+˭3 .(J)Hd xr yL@VЦ.Rxf,nn;gdrF! _I8TpܞDE PU-!v />hUN(TM\}_ERZ/}S^KkOZqBN怠 㜦 *kۃe?}?ܪ0dڷ2prw95`hjf56<•f+d *9J3=ƠPiu;BJLчz!}\-yWF5A(*4< ڟ|!*O⃬r$;y[, endstream endobj 104 0 obj <> stream xڥZɎ)p_ÇMҷ \2?yTdQ,SY ˏ,̒,;|v\7.f|,TJ.6I}1ar'>o>L:_ Ox?I'w.6V3S Vu(l,Q$g2Ϡy7Fb}[U5zHxbC6@ 1;)'fRT6xq@.V| 0W>QY7 sF>Wd_C!.(nr#\# 溬F#XSԝꍙEf#/+c<̓܆ǐ*hv QFb-|:^ٓ1D'(s֏ٽmv.n<|6`pk͈N-S :#8NCڏ }2!mz ay>`c\_1>Dt{Xe%iwC)뀴JU׻U,靍6Fe6A`7ADyH$ YlU#K z4bjmTEǪooud 4`>ޓ|󣱿ەO_nv1`l'nhp$\߹d9h=J;{xrqEF #C^8{R-4C*ZQ** =!=I.߉c0 ~ñb6e +2>J%pk!#A]CRYQ}w rj _ėeV;]3as2|=KЭpfhե63 jCӗQ!RpYR$gbrF{̳KcVv! @&ݗNj'Hԇ+Eʃs#[ M#ZypZpHwiLsUwwQj8fa8:}dM`N V-Dy42FPdQ]u["뾸H!tQjsF)EU:{2ШejM0F;V$IiheD2EK<޳zӘZbKLsDju>ms$i$ds0Bۄ]Ewʑ;]*v2Y~=Իo{bvTPqP W7-Zjw8d-Km4)rƌa~lOaڬ 4֣W%8 =J Š[PYam(Yc ahlm[X"2qw]\R`9KS8L0 h&udO%]AJiago.~ `ɛm55oya>5vo0.)€dW*fz[tJ2ktIc\ܞx{X̅#DÙU);ڊt>S::/pg!]Lx z)*qw1#ެb if$4ڢiuG}8 PǏ$]/u̢ao- z֠E,S M 1{ïDff!#-EZD'۱ Lw(_XTBF3Pҝ endstream endobj 111 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 112 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1160>> stream xڭWMO7G8djؠR6CCD@QػAbXx&Sܵ徺GWbt ;n m֝-n<;]>|r}Y;@E\.%R ,{TqZQ(+*5YV$VV(e`'بB`joJ00S|9QTWA+OL$c4ƨZa:L"`*'S|P̙Dx /K8,&2U4rB*n -R6ܘm '1UlVՐ8#>o8WV+!N7!?|`!^Ǚ)oyǛAT :t\TpZ!Ov>|Cѹkx3JMWToVi(f ?N!ͧyVVy(ZͿφ"ACݘFZd*ooh}0?jq1eXi(Q#QڂB)=_&lcl?$J{$^0w:_HIЦ㬅n>pVOՎx9 wF liⅥK]#d@ .cJl|q[֍:qzGg>rNCS;'4{3} rpW;th?:ķuGX:v嗍s@k~xCh;ޛu>e_-y=H5k4^7%eb^gvKKr]d[p2 8 : LqN)Nxa߼00C:_h ?_TwK:`hvϝ]|ѵZpPy`{G)ѝ]\^^_c{JTz? aG.qu~ yTєGswހ >|Ï}ޞ_rScTޙ endstream endobj 115 0 obj <> stream xڥX͎6 )ъP@oέa$tPl/}~(v2E#ӷ&M}ÿ&3ՊtD^EOfNTv:XT4?~b>DZeizMI :i܉k"NW{::gN<2ÖS);DǕ_,R8)iG lB8yL+=LV)v@Q-q0AT(F{R&RvmE h w6m9Zr}Y+?MK!r8aGV _s .abBx_ Vy%8>eEή ~rৃQBL|ntˈ9_Y2/wMc 4æ0(z- AP9[ʙ?&J Ha5LBs;&%=:ӿ0A82_o{>B8ݥP/h].I1ߜ0Yq(==)Q}jMjT+c3ި"^; 9{bLG$"\Rm #:qeQє î $$%!Ѿ/y%v >ȇx@(ڴg) sF+kJf*mՓHg~nNbF)H* .2]E"#g0)crj $J5d{]8`;{`=d[4hPqY[m0X! \Hgq6k N5~E"5$nGiY΢)A \4zyX۱{,foh4 헴%*9^ LV5}KR0O$~tĉJ8:J:_5Sa3 Բ1TOϯ_.X1Їы_qq C&L)>Bn{QЛF{uv[F >5ytƚMɫO IZ◙r hQ [p7 ;f11;/`h^ T5d@NqMBvf_Tj]Dx$0P{iP:&.7 _2Aν>Wsa5fI1b7({ _&4)~uxEdAc(AIybt^@,|~0)C?28^ CO*O/!cc Y=rczy0ѿV`C ]bq i Ezc,tCZ0ǿye endstream endobj 119 0 obj <> stream xڵZ)氊;` @n3oAd%sS\ $S$kaW9r_׷_a) =6 ~;Y/YC<˿NK\V~i~IþN@N׍5]];.'T\~2#e4 й7O,URҎh 0ew'T4v q*|Z0< !pjޜE{6@scVu"<̽px(VF1ǻԍfS"`bk}R'Ugc|jԴ/Fw}exN ^"ԼJYdd'{[ ?g4GZTɎ!$U!䵆夽/F}Hlߗ+"QW'AH:.9}u#Q(O.-*)rf\لBy{>4 e{%gI44kBTh|t<W|E%je#0ݳܲ!K`; zA:1.Ȕb&%,lgixnPkWFt$(#<o8)~YO|taGBᔚ()߉I&Ax'ܯ%T5RJ(F̬X},HPpfg/4̋p ;]TKTz1Qw?uD9KX3krM9;P[ N52n]@-G%av!ɤAQ]+LJC0a()\#}˯j/m]99i` Z4pnҌGPddUϺT q^(׫7 RWA82-B:h⪋aԁ:-ؐXwr?geL1,*jɿ!$E2GqVp@UFxk'l"`) !%ZǷ>~=JJwj+j{^VdvHɋ 0Ş- 9f7te Toݏ<;F,Re`smW!O25fgu;..&b) lNDoջXid91b6~p ttoҕK0%m\ITu2s @~'I oLdt0[9,ioh3gcynL}7D W*)`&%eTdm&V؛8}.- _.@X~ i|>RCI]D6'}QJMBAN1Ϫ P%u݉ حu2]>VVL˛ xI*b͔U`W#50ud).E"f&SHL}5t5HW&貵F㹜L r6j40rD"#rf*݅oľ@N(gt W,E^]ջi%_JW 0f>)IY;K.?33/ejoZȻOMr[T0 M %aR*YtcUNx 4evFZ,/T՝r5(Vy7+r+r/gkt[ilm?ձp*oW2r|Y8jhM$ٽ?-_ O+S`}ݫᇎZ=5UTAO3uF̳uWѧm׈y@&/USYRίemev B(D_7G DhÍ ۟uD?6?'=1]kYK]X=xh`A8? B dSw)U41ٿQϟ0b`ZErX$)cQL%Vni-sIT1y< r/1cl,aJ)Ld>̭Hj֔SJ6WUkIjʺh\?3䪶HAXb%x.#ޚ▄)mS BИ[דQ1Tޞ'uߖ&dBOinSt5MTܹ5PbWcUVOrc}3]g1vg`ƃ=6o?'bqJ{_!Pб Y]?/L8 endstream endobj 122 0 obj <> stream xYˮ6 +(.]3v6"Kˏ;E ō0DRD9A $q ,i3k9|*=V2gua0#ae,V >0e;v3ҟ^C`^x1,'s̹VKMsGh34xL2[受_`P@ ̂Z.'vFh8xǾ=S'4G4AHIKt'R= ˌd@<4Fo+Bτ0bz LFizWEr$8ޑER56'lHZVx!\nFʐHz+IW$ IpHZHrp8vc]FHNj61$6ܻ pxH'qT2.YW Mr'夸9Û=2Yz Xl`pGTg2<4PDT҉f Doԁi`b]Ezp$j_+H7#,8DP/Ȯ]N3ڠ\]*L)*EN ]Sр>JԦUCi2`l)AX r%̉=K N (ީi j[kkE-[EMXgC?]Di-3-%rA,znԭT5)>:aZm*ݎ:g<bk{ %4|Hc%+ Tw}(*¯MȦN1Tx-gZ#J5aH/OO:`$I]J$Uآ -{ m!cBE4 'o^m7ܼن괥xN{RW0]p-yxnNzT⋙x1t1㵍{BXi_w'bM)ΨA+g4\8` B9D.$bI7 OR"Lڑf2 dI45( (NA"@޸ )*eH%}Rg :%zTQ#+%I(U*#ZI1Z{k2TyGg ǖ*u `EG5(-ds\T]LaK]o1-*b*1T6v mR?=%c=IRX)tc_ w Uw8kqT)BovS?w!^wѲ:y˱ PN[oܷ_6'J|=^:5Pw҃PL&9Zi> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 131 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1216>> stream xڭVn\7 ߯^I= Z4.,Ac (=4ͣF9#CI茄.S25ɲrS1(}~7OzDϷ)%?Bl^P?63]n 7#½21:Nfš;2 f5j7n,͡(MvTuNb,0; \fRY5.*(5źTJ*tf*!A"G \/ bY`SN'.5wqgkg~ENbpRu7ċ' KϦ'i3_5qf-VɏW+ $ vOxV r|+wċ4|v"cau2(nHqG wjӄDWM& @c]mZiv/f55##Pu> ,qFp+f\/4U^il^G\YՀ ϕhB8==ayPUTP&,lwι;,+] %9\gŅasR %S-Fgp"Gq<-!sq >7mars 8|"ո}%5sux?N. P\u^׹4rW}avF2CaJl0FjKf%e-#l>AF )sPB)1UAĘ=~"` qwN0"ꡤg,޺e+'Bׄ=/0G݆ wTmXq {bmxtr+9yGLAxM>F5S~SBr 2GޓO\wd;쯇w;CFrG$60hף)f>[fݲߧ,w^Boo~Ä ˧ÆwAyjd .IT&=MWOCZz4 o_:rY o֫a-Z՜ ,3bC~rɯN#t/\#{MOo߼=}Agߨ6> stream xڥV͎7 )h =d\(v/yfȏGRE2-L4}(lWg4&լS&)&罎w_~M'ZFgA. 9ƐuN˗1&E |;N֤i6dzQFU*@z\H u+gf`hNd\~YeF!Q կ"LVdh֤a (:`W-Q4Į➛-Y݇ 3IG)]"9[T-2[깯Lt2A"3J~a(T:zV7*x@@&;c0tO44#`NgQ ::fv¬`LOfV jJڑOͥ*Y`Tv!߇쉆NOe *c,D:a0|lUaCY& vF2(dL(*-\ Plj+0q"ߓ[x>)ђTgANu> k.DU[x#m[Lwʠ,"?Es QA )}ZHI,NN5 {ȇ{j(UZ=n`Z qĸuwBm~..t_@CU){Le{>MfԮ&D- ^msց{&%vڢmk`&;_umMд%rNOݦ%x/wu5wHOY7g|?Mi{ `_m(юYm0+2P|  w$=yuJn0&IULfn7;{AƑ bx"$|úBP$h~ǙҘ#k +J46]n12 ؑ"9?z_߬շ@/i,o?~ endstream endobj 145 0 obj <> stream xڵZ4C70-?3\J@Nx3+ZY,UqZ$+~~,ZH -/?QNgr")$"3n'"xݤ|ϪneyݥrkpҳZ@] e.&0q4)Eh!|7v-WeL~c`i{)&q"'vMSi }Kg&%W,lŷܳ D$^ꭿ)"ېEYKo %⺥|0;QKq=;V @xy&F!EBPI4+ (H 7t6{J)&ڐ Cmia[Pey cz1!ym}uF(ЁCoA|a(|@P1񱀋qXzJ e7K'udeTЊ> ijOW0͞rȩL,ʌ,!VTi6W<U%( KovTVS)Qn~FZ!|?ײliKO9 "g9Zr=|DkZ蓆H5&]g;Sɲ/=0c7jZnAV@fUIj<dHsA3eZ$A5@HGUz%dH'UU ?aL:g kKQAs|1GGA:jX}s+S7;C%z4%s"$=c\P18桱Ύg+22{E?\Wm~`O 5 VF} c BSc2CK8̪((4Cmc]Z 2[ {SF`@BU;sI E׮A!n)ƩM?HA%vgZ_-."1$jOٓKxG{946NBν2k5's23*Fk~gM+ȇUz1qj3EikokG_;~#Oi!BԸT9_hkZEj9Bpw%sAH\sMd֎R|أSB^8 Fc-9k i:Of*hR*@몉0;idqAm6"#t_Ѱu_iiCa,qC;Cn _OJק#WgsO9yZJ 1  csiw=z}5)?ReDSq^$4S/Ďz;Ԝ#5"ED>U/wHȭª?17{ЬGf0q&uFկvf8 endstream endobj 149 0 obj <> stream xY6|e?`l 1vA՞.|M^?CrH3͓D oL?n_#ϻ/oI]wʰi`>!:@">e8#E1瓉Wj|ҴZ7Bkt\07"2 ǒU)IK/V֗^H٪Β(8%#[y;zԵU v1&ə9O=d!в.'CG 6)64IR%R:pVx }݅޳?>ߘ1Fܢ3$03[0lFɝ&@=@`ڋ09R?y\q{#3{ڙYW몏\rt$QoU*NKj"+0cL=gi4~v~{hbJ&IqȔ*d5z3x\r|dCkM9yz pEz)B -S}P_uSVQDߣV7{Re)49U%$oQ sa&{g. |x>٠FL LfpXɠt^gw/wbJIf2$di>S b:8TJ]JR(zAKIP#X ( ZoZ?kG)Gg6X{#8;%)Z*R~`;5d}x= U88BGCPM"VqY7|Hˇ Mcp)ۜz\+;"¹ j0-PsknWSJzTyUReK˥:RK-,V챊Ã軪էUTkzD)vFj=Tg6k:A{4h|̭w5 c:.!ѩŶP/?}S%AtNtsQ38B41>#eQ83HW0mWJC MtCq@<1d?j[8wW\!2ߕq%HW2 wFp)dIcqׇ,1+{A zkĦMuDYlv'}G2U)w 1nwhLpÔQw 4#tI1bS2_{ 6-[uEY*\ө=s=cff7 /Jd@hxWdnH}ɍfgX@p[WP~]|eӂ@eK7:;>*X RF<0&1FX01֩ݻi%k/2#8߁y܈v9,;Y%w8O? Hsn endstream endobj 155 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 156 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1603>> stream xڥI]5WxIÖ AZbAX14"~>Tٷ(Bz>}]ٮKt޸w;WKq$ߓKs?ooc-(^~} [tߛ[2G߳-?˱[> #v}.[,dRG-r⋻Q(bO>cr#d b58/KyoŜ!Gt15^tL~>?csssB+Ium@WQ?ݐ|c>7q1<|n6_eV7L)<nwοۃl#L,Wk%b\XQLG 8Dn5=^jۥ_{CiPfdn3.ɏ@Y~GS&6YrrX՗Id%RH9o•HHl^lw(eH(r"!;R6kiC){Fr܍>mɂsImy7);|ect;YJ;9[3#Z 5eteAϵhcyt|sA,@'ͽ`sA>z-Py.@mEɻ<S!+<:QУP*E%{}~s44F5Ow'Lj7Ǐ^7GESzyNNSbeh,؊-qZw1j]q(pO/^-V9+sN +#JA~JFd37̮ Gi"-e^1ov ^V8~*,(6h`-LG+u^^2JO:nXiC & t7h0XX VH%D̸Y5^kc0`|J,1K,1KUj[E-n-Ͱ0q Q\bX" .y ݤxNs mFjG/0@J,x!up\IeІ24-xOxk_"oҤG]Ҹsꆗq/nR5G6ԅc ^"/uQ҂R57TM /0TO /U/vKUxsK<"1p‹dI1x/_]///9H ^U ^U ^U ^h ^)/9 ^] ^T^"Yf"j"7hU endstream endobj 157 0 obj <> stream xڥWɎF +)Vd6oAli_EtAwAĝL`JHvZtLlNELgryoӗa_/&f3Lm Τ9c].?lI`<X3ݺ mʫ%5_?.OĂ&FЁxǢE6zrIytkvĈIn4H~݂ _WSmbj1ގ)*>} & IjwI8k`ݑ!R6=Yk5#Iwc-8qJB(SO_&[rhzFQm$4S00ÔA?Kuq)$xJ%AJ)@c{FU@g 4 HF(B(aPqeN'\`Rg چ b[õ':b3Y[E- (4[*z~// yND5u1|ڪN1 CYq, lD4:,d>-o<e3NEf#'P>u^oӱz5St'+y(Yz>Mqj9,bfyWk=riFۦ#-- َ8#~tm^[$w b )s >oey;$mYv]"/5t[4D:/CmqػW }a\6`}ʵ:㺳Ɠl #wwRT_t/>=v@LMLf/?a endstream endobj 162 0 obj <> stream xڥX6#xpfc2A9?p],Qjͱi Y,_zxw?_?z78kpz}RJ\{5Svn 5PkCZv-K? ւXg%ǙG;F7!=-<C<:#KɻRgS+ t%#p Y_ZbY BKel Mžm8}*N[jdŷ6O/Ƅ8x }hP&afѴ?D3iQChj#]|\GY>;(qS'L146\z.vz^!oH;:;)kӜISyuimL˄_t'`sL '^.&UD]S}#_s-/3c@/av L3 W|RLt޽,N$;O]2LHQ9ȃ-HY]kcBH= CɅ.aA`1S%nm-!ґEEH6]GJr?>IɅV)vy7m哣=S1RlWkJp|*ًCĽS~cK R>*yPmF:^v[]Rf$%Hw=vRw,F 0\5q[T|Xdp@ ZzQ=k 0w">b~.¸+0eog ʈ  1/zP?r6EڝRXdhxդCa&}|79ʦyq{dj).[ӬV3<Ī9lF۽cX% c3dPxS +`$1i@lG|bhAu) ַcKG,˞~ӣ_0 J{.%,S>݆X1;BFTvJR c+Z}/Gwk\]kFy^S(A%CWAT0' WMR|v}Ĵ>Hp~B nkЏ*C%w\$xF.^^҆\ν_[sm):?X_qNk' g+Q =az6{۫K )k1J@îq> stream x\iǵ~EĉĐEj%Ӛ!cSMix!G @ֻ/U}{teRel%>TU6W^ʄ*8SX+љ+EWgJ6W- )TKcȕżПG;Wbɭ ;`"IW[:fE|1: XI5 TTtQT>Ƞ"$% uA%8TLD?@ 6 Ǽh*CUC00?aTAOjю@a;2Pպ@̓$)8./`O+c^&F'#"0h =`߃#R 1'R A8d (;ɳ][EOňbj)F`tI@&-iɀt@U6,B(B h"8kdF$d 0Ў,ˁC3.u={ 9Pq2hX rϙ 4@Q6r1 JLDbWZhcK\}P$RQkhOCHIa,wt[AAWxP0փB`|(sE^ې2"i('F_%HK,qMNrE&Li*'jiRHVrS-*.W07DQdz*γB:3<v̵\ᄲ$.rK~=iImyHFBQzJv4bw1HP9NN%C[â+iȔ uehfJF#}KibO+ӧxS&Gy 1A1ʼ=մZr^[h΃rf({{:1O#:.8?#x0~@;HhC:C#}yjXSS?^.Wͻ-[,'e~͈O0l<ƲWVGmYm6߮.kf_}Uh.q*]@KNQ1|>ÓT+Ddܬ5߾xCY8.h@A=tfy>~,fm:$UzE[}X,3"X>=>~kFY׃Fxp*2 !;>/7u/,J3{F10Yosŵv{FȈ*1CN<#_[ H_2|߭W/g_ׯlIWeƈ Xfʌ1(;@LY'`@ͺQ9"P t!8i2-DɟCdi͢k?[߬Ey|}XlyYmM}^5͢mS_,7ﯚeS_/֫en`/.Kf lkh.WWj~WVc}1~OfH9։=Rh-i~lyb /N'ˉ^ϟ>KAdF|];xbHءY<GF$G8Ǚˌ崰֟dX(=Č g#U</Ef#ۻ0|w{M7⋥'5y%Ƒ+8ft 5r]L IR\͜nO/N*ێc Tg:1OНLcjyڎw݉Ѓ>q*1#o]%$im6yI'H͹F1^]owlqKv-fl?owqm ý;d 8/4]VxNa<)x~fOug;5\'WYKT_3ۆq"I*d5OKrV"a`>'|ėaA*[67_|^l7=[]v=607EFc#ݔ1XJ5.{~(w6ki/f87Xz~J=ƈt˖Λҫ~ hpkBs:|g#/X}i6-3X>7X>K]zƢqNw$C0}Lfb$,`9uDf/нj̹Ub~/oE"|( ?E7Da#G}`rnY62 ,r@[}e$?>t@2e;G@[i>Mthk öddt;ُH[ks4h$m`ږ@ C{ǥ?;*j^OʯG) ] B 6Cn@0uT1@6YxZʶM0dʠc(YWk|H:(J@HtVp3*=2?”gKi%^PK=`@|u= `ZǴcUe~]>m~X>RTS;~k\mޱگZ\]G LTnU2+~˿<5n oV?ďʧ6 F凲^YstKYAdVp ICRTpkskg7lhގċ̯FCdFz㇒^U!+n<'qIӎ}z}bb8grGީĻs~r7}G޷N:YpK<E ).P?2cHo/F|.:nɍ4uv2o{?b;>SVƪl]R<}8-BԸXT0H\̈́7vj6_mp}v%$ΙR)8Poj c-VK=n#+[N{S~$dQZ;,P"gtD~19+dR;wyA(f*Ȑ?[T=)S4"S}0"}⤿.ONŔ{ۚ0POq&p||/4 endstream endobj 312 0 obj <> stream x}Qo0}ܲ,C6^NGTC<_]ՎYo.Rh٦RC}5l+e ʪh{oˎ_5OC%wO.׏l!QM&cvg>po斺]-[)ΤǦT˸Csk»'dErZo\vjGAh3z{8{R8(YMqgBesnRI޹:&nSyӋ /B23fDx 2%RDtD.QD4"hF@rU|"ZED1 5JɅCod@{tY >%]/j͋]ef6}W)ܗn _A endstream endobj 313 0 obj <> stream x]j@}L)AW!1 x5:B]VCΘdA?INE3M2Ս4tK`866e[(L-0s=gy~ymj7cԔ_0{t]TԒE1z}ae%4j݈# Z=8rN%B, '6@T:w1-1҂!\QmEDHG?SgjS4VVHTb"(rS$I I fT}ޘkd; ?l/5+t۠pg&6n endstream endobj 314 0 obj <> stream x]o0+e1 L .<-eoXNGT?zܴ $r}_I:OM :Vl+SZ<'QlpW(+Do}(OfUPOj5{=sni[+=XKD[. s:b2:cknZw{/D[['dM+$O%B-V,&n{],,U]!D!b1)($E@z-2J!.@fFz]VR7`.u$2l+F+wQ'P u՜h̔Lz`]J.te2Mݸ.z~* endstream endobj 315 0 obj <> stream x}k9WcR\ fRc+ġ_>#C2>ݎ>]nåX~NM~/at8.*e"{ޞKy{seqsӷs;|~dY#߿c&^%>nQrފ/>Sېy=xbZQwNr=>ŝHUqoM=<~lT]2U-WDTdd=P@='RBQ; $NMOՀPKd;P WMe 4ziPȔTD2_ڡNg9hύQ+)zJ㔯k~gT>SaZLQnnm<V69hYnf9f"3F(y0ʁZ(eA3 YIPi@dlGLjC&׎5}wۤV(^וPD+'XQP$+c|<+{V(ˮu f +  O:.CJ 1' f8c&njjXPfR kim 'm[(` SJ(ǂTPr5}xdofy4\ZV*(Z rWOCW((5+݉>i8}pDl>)DDD=bR1)U7R|IܬITBobdRek+r'EN;8)]פܯR*kRtW)k-ڨ̖CisIوdRDtl ]'M9} mi*&|'jfU/a(ӽݼSf|]aaH"zSk|;c+<8h endstream endobj 316 0 obj <> stream x]k0+1v:ۘx@MBԇ;>y_ge^j50YkvVi/Xe'gaE+X('FEX[VCC.Е1,I<)Mվ6gxctVH5Z{/Ms! N$VʒbZwLYFxRAHhGđ"RnHeDQH8q]'( 3r!Ռ4IAL5kLAN7ȼm՜[5>a9:75rp*9 _ endstream endobj 318 0 obj <> stream xڝzwTT 9G8sl^A:6QPItņ[,X45M1{Cy~u׺1f?JLM%R ׯv3%x>_'O5DA"bhm.eeZ`ҏ{_"&z3k>>c>x|L*n w 1}:;$ny`kuWjKuH%7zBu0 q78o )W`k=(x4:v@DW;;Nu_Cgx[{O u_EqB]gv:THX IHJI%%rIIOI/($>+I_ / 5D*J)+E.ҮnRsiw\CSKj!夽 JW.2cdI hlfy(%dT hL"5ZL̄>K'}j`*,+2eM'a>e .eOSuGͶu%+J9u+05O1o( 걲GRQ=+z+K 7k=q'ٖdYZ*7Ye/B2^yT o[4~VbBf24HW[-2ZêwgPhZͧNq4K +İ>3 \RÔo1tɛT J̽ U,+pk@T"B2OQ[XRUVs T=hi,v 2<mGM|[~c meU{"^n;U h+5&qb#/C ct*^@5RȿHի꠶Tz 4?~Dž0e`AjwtA\X{х?/8G jT-W;ƠO#' \(E)wc^<#,Fk"p y˦i6[Iws zS3TANΘL vL",gnEVG,V'1s Jމ]-ymTm󇙚阫3BӴQS.X?F q2ȥψ~a{h Gcvvltz4'26ѝܼTJD< LK'Bt&1Ic6R] Dm4:C&$-MZVx JN\1[M$u̪d :[ Wʿx ,J\T0'vQ_]XRU.ZԺ{Ev 63YUUl)dSо#Y=wGT ԑI0 Mi"x˩Ż6N0ۍ$b#a4$ôAZIැCVHF)JIl]qW̾ fmxI<28҆$RW Tls|$<5N9cj@̏ď76Y 6X\3RϰzqAHEac8r׵dc<iL+P&:nbg?,ަ}LBvgv8PGF@L""YAAo@nv?ƒ4# p%7콹"v D) %0dz\'层QX^ØD'؊ʺ]P/agb)K+ {~EX#~ZwlWNW#XUP*VKS A1W,*:I!t*t.7~|.NB/u3haJ򲔅9YV YR9 VY%`00wtN3 ~~ d,^4U5H!d[qtU{њ="*1&A evma0eE*t\Kxqad;ΘOX{QTiQ'`> l\Õcx9rZK W\&*::.۾ zHtU>EQ93 ]d@f{ YoQWQ]Dk04Ua;D?ڡ bQjVZnSr>1K_nN ,ykZuvyڍ^*)r%I0\bw"~' tb~C;|.cIC-wGBVÊx\+_n<ĭRj:Q 0q ? bA=RnwH$AC$L5#t8eǥ 7`M|x]p5AK)M-1.~IBn`8˘)d 1@'Vپv`VN]._L}]."<[֔M9x8x.ߐ4mlASI90;Aa㵒Zk~ݵU ~RI4jfrk3tx{i*ݭ /NVGe2s wt{ ?`a*k+W IbY\ݰABw %!޽;u7uO)[6#]8V/W\X'PŔ0:)yxQpJɣ=7`Ʃߕ/εik%hSB2 `c=g%z t*"~M{ Cj9QeΛ,䘤g !L-7j~;`],ò-H9^5JLU$mHZ@09šJ}hWhʯ8&4=޾SetAUf]b8VKFRusk9 :iF)#m[sWVq.4/1K"Yi ~2j1>Fc&~]=.YMX\"w\x>31&2:6) АZz1( MWo`HF:VM?eMa$h!b)w0o`HA"37K;8s_:cHBM5MPLVɁa4}%1lA}ik׫׬C+y|'MkpnCܡ`RS)j,!v1U^dא[NGkB'`96hc6-&~,7Z۹)cE$mlCwc'tӮ6p'vFZ^YN<ŮAJr4' sW!j-‰9& Wbok5/@G8<Sagv#2F}Ưaz^O2<~;l㉇}6([]_֩Ga.g c -IaT I9anӁMMfz&p슻 vo>z?(&Ɨw^rUi& BT6B mW g+|< >h,a.IAq n1xt/)ba{99EY0R%:FJ^Zc׹`+ 3)5JBM;]OvR"< ́9+t RJ7{3 ru |F\CmabhؔDlO–R~q3;tIYx,R # 颸t?;au20t.\XC#:1r:1fQcuvoKSIfň8?Ĝ$]27+bёE%8S5, [$[!ّiI#kiHjQeG(iaW_nP{ "DzBOF+#*0nj$ Wo7ͧ[xlDa2'w].77em8%#軞'BͫaJݓ_AWjW m=K[b =P`O',\={|"9F4rmis5 ڝ x$9LCs@Jo<=QZ]LHaT/#mVyM|`j+0̥nƎG;.[gɭπeՄnjLA\E"fɲNϭ̒ :0/lX'?'+6?HS8I =i]04k*03h[ӸZ>y!ʾ4kpȇF>M8eo &&V@OȁaDvA%f2)ȩN88.릦;)ySn҄]+}X8D96ǯ4])ڮT!z^_~v{n ;NSgl˟TQ#ZIraќOK-^]19&fEapYv)x1ZՅnd"]5g{u'JĪ]B]VÛϬ~z d'P-k=zkr]b9kOTKkׄW/t"uUl\RqFBB}^s^yDkۆA.]9) 8-A5(ް54WԐ'S6Ătw9=~HA{|Yܪh,* ջy5Rv~3n Rm2;#'d;˞) }%6 xZ#?Ub CX<8) Ɔ(D2~@bCJ؂G)|$;F*B?": QG%qZCx2tA1m$t/sRX&@aJZr7g`*)1Nm U #`ߵ7KM%kkŇ5Ҳ`׏2H* gB75 NJpq`-l?&5"kW3>(h/l6xmclD HJ 8ײq5,$&l-O0E`qwSfU#8nݑ3nX<|&hƛj9 qn=n}3b0ĻwLZ4C(aNԕ_/5z/[]cdR.6Ɛ2v eE1` `2\9h\3jFP&ģ>5#<>[*-O![dp>sL]':H`XY%vab3Ul u=;LxYAC྇n=8NncEkmm)+m+$#%ξSC:ց W)^2cCS{qk  /k]iXHmVZ tH4΁i08TГ 3β9 'k ZSB!/%O^y[^֎_Aj(.X\5g\iX9N&^T # endstream endobj 320 0 obj <> stream xڛ܀ qq9, NP6 J I endstream endobj 322 0 obj <> stream x]X XNQ3.UQ/bQaD@dU"} "cUk]իZ^m'dL{m"b+IJTm@j&Y/X ̈́ |䌥.32o[q"8{tXr=eYN4]y`9I4L$DوDCV,/e`k*F>9MlVjb 6A16C_)Ox6>(T5(>*FfVٸ$Е۴*ӅѪm|DHP߿jb"BlVĄGĨT vtIrMpfϜ3{CG&YM'بz[}f+ōLaͿ3M2ӗDĈ" \4Fd-MlL,T#bKX&)-YB]{fDN(hC{Q>*PNdgR _Ř*Y+a><ń1'yM2\>|#68klQeZ&Z2X](';'jȐF 3磵Md9;: η^ hqחO/e ߓŊky)n0Q %pTQ@ waռiWw\m7W1:3qoϻN<'0bCێjRa9A-6*R{`A^'|x `${b$M4{ ֝|`rLiÇLITeT8^OpEVh`$jJIT4P`O٫GB$ 'p=N kݼc׭ N >1{秫TI̬$MTE8bIx>TΝ 钃5p0 w! `b!fͮexp2rl4%]wZ#%/;Ny9Xp*q! 7XF<:bq 4/} =9Cf\^dǩgث4[@ɾb;]} 슆#8saD䔔B׎ZK7QԂ|f_@=ʯ:_ɽ:e6f5xE4GFEC .zCt0,A%9[+q 2vJ<fF=̔]d"A{0U+WP5Qje%D66.Sќ a !k~ZwXPAuDQ |$e'c" &IҿXuC%ozfvÜtՆx/,K}p'U1;wS. qD'a>iHk-P\wJoAll Y%cΧBVhNT{ꯟ|l z3'o wؿofؗa($@??x/jq0 Dە~j yUyGkwe*188Ԥb4qq XHpPŻp¿ n:~DF~õWu^\u|G2L00G]X랧1"Qp; ӍCd[hn+2F%zإ`K8cl4  MRG1:J+S*ݟ\Tu0#I L-1@_pXCU mVɜgˏ#n, x.PGmuxy.KRtXrdYJ>aKܧG<ܙgd+~VemD a8 ɨg4hVo- \]]S#. nV`=Ü6#Gizg>V0rzUO|oE# Z䰴&XN1{ϙ(+/.Ʀ6w}ޠl զ\6z陪`b?v7,>ձ f*[וX"O@&\I^ Ph}NW-1 1.of}ȻJ &8`! FJ ڊqvom5\-7ClDIZ`v$c&ta '}Y9[yF3x(&@|fMpF2wӳ31AĒ}dJJwQUSzHr?-w/,s_Z*ԡչyewC$#hr[R#WԈ0=5vvr'1Yg'Qp(6U<Y>a||RP ^ Hٞm/o0dJ]E [Fy^T`=Ɲh3ZcnZ ԓ2"2]^~~|)<{kMz7Υ͟¶R؉?ΓJkQ6]^M͵[L< IUZ X;2q@&E@ W0XN^DQޝm}MJn.S2к_GUm&pM4X/?d*m瘭H\SPñ 6l^Z>b(I&U}T"䙡-b`2̍8&qjvsDs}zٚ&El[ht7~ƽ``ܩ*;l茙Q˳:0whĈ*On…}1{P̿*9L}c/xK&}x/~lo^) 0NIe~b 6#ՂMQԟN4=MGxY[oFq c 9]ngx)Ez5/bxFdN_Txbd(U\nKt?hQ!bgݿ4CI|Qg%Q0ʷ gXCL|Myi!kh1چvQ."Zw&Sxi l` 9K奇x#)զ"ͥA1#s96=M +a* ca,{G(ZhXP0em\;YAnwm A>L4Ȅ~f(U46Ot~F HX2QE\84%Ҳ#t+#12 ve(2#詏HhT|DX`Y#ZЄa0,=\vHM㳥*s{؞0Y|RPó#)P2ӎs  ijHNI/5`6&r0$Ś=L )E}RXkdE]*%WJО##Y!B.]R] ݰ,=,ϧ4~t\>li@:~fY:4~tKL`(2~7C$ 5wsn>zt㌨|xYYBWXu,{޼g7jj2#/Qr! bg|QsEf &sWcCB?Ԋdb$[!L:K/4+Z [j w$Q23z%oa,w$rẌ́Iju%ub"µʟd5=lJDcxQ;AM?i VK58Tlx*{v`F >g_h1i^ߜœj1^F7TIixCe?.*%N-NG>I|KpQa5pO~#@8C!q f#R٢G1ft"trV.=`HF*'m\λG_S4֤'ܸ^]t\hh>t P+۶BCp^}L69]]čp$:~1Ŋ`ugSddl/9ƟŭGH{'̕Cp#%E )|%o~5Ο+~I7d|'oD[ =ak<~Z1I9[#aٹ)br(?+ݽoEmk=İ0^fshYN'L+ޜ> stream xڛ&ŀ0=@ `\0ye5 endstream endobj 326 0 obj <> stream xڵXy|SնNHI)%9jDe(X2D""t6ifhZ:BҖM!'d$$ʌUr]w}Z{ߗ?KV߷kG(ppB+')ׇhSV#fynY)+ܝa8ygQoV4F F]sG><u p ]S𗿀,VUGgL>Ã'LV~%B !1 Q*we*2Q^٦N=t26bذؐddהq ajӧ1}pIPF{Z P'Mݦ!qA]?'O9!fH b`Y F MwaBAP,t: G %B(P*2YxCxH w~E0i 3E )RBD<ZDG o T:aDk #gK$*'.rkU+fK? >~?xj/#q7x[-qPP ==bkg/M\+nUOZXq\&MX{Lo|EM/du-ĸHprlnvXPm+,*rMF?:c"3AU`Gs)|RSZz&<2{QՆ"I =Pcr?-VCLQA~g6_L'|[!/m>(pElt쬁ihNgZjD^,!%עfC':Iݼ? O®I.Bv (}IobXApWzyW! vÛ NfqEKUKlON!ɜe;,E %،GQ0hU(j';\G<:QTѳ-A0(KNc0(CRtd#AEH\//ҳCUe II{<B_D_&ηUQEye{@@ޓ:K϶QHp4YLmW䝬/7l4@Bs@|Ub\fdg xGf;.L&̍(k{ :7]c )}j;@&R{ڹڅ\IL+<~9k{u7??NX''SRT7wfr b[N/S6lJv:uM*Zf fx) Uj)T3?v6W5FJNhTO0xL-Qv2βLmY]xu<-Dv.LKw߱+nnvW`/ SI,FuocЌ,Ź M`lVcګҴJnIgl\S-~`nG ]ֲr.c~f`>%>o}$th ]cf%jgQ[Kz6ENZA?rn)Ԩ,zŹL1ݰMU[LUdn_o?XZ#-P囷Ƽ<-׵t#xSW8RrvԛX!B{/ tWwύN]j 0"%J,>zW^ 4tct<;HNՃE H4`њutOtDEPNPz#+'Y/VHpO9/A(=r'Go9v%L+#"SfAYy~/KT'6WX*)Yᇷ5ى:yxmr/iM[YOڧnu=l퐶p[':ҏg%Uԗrć0mFv xð66jy.(*'5Vd% FO{ȅ QT6R9zbdGၻ"xӌB-o RٸN/|@ӬwJ7AtOwn{R`^֫"Ç5y5vl7[ȢlWPnfygJ&,M&Yv |?70 :=gJ0*xj) 7f{HF7a&IAUb80d 3|XWy s^:9BG3AP Xrb#HԩE b;d節A^x +y5{sZkI]2&p7XǼc7lasI-d/P"kC;3z*lD4JK1[g10?,Qcχ5&)+:V5s3y~-q&&"D* 管~Bڍ v#صF󩢻Bj>Bl'0 Ⱦo߶!я]}o +5+}'@/ ۞0?+memzThxh6܀TmrV|vb1j;.x:5ps'΄u{zЯs'6-cZ q#X)j#Ά7V &kwXgQ-oH<25[dfyc⏍.'{?ip!1Tُ}OA3A22[q %ڑG*$i/'Iz.yL!.Dd/˜ },g| [?;OAv>hɬ৐9TKq 73uv3{h 3|sKr˙ [FnQ(ǘ/LKNI2,9It07A`JcQY-6EW5 XvTQ=[ʊrݦ|M,8J-77!8\ë'@p B</d_ОXSO2@S@渎!{T^[´ X+v+DF[]HJ>A?W0lq0GjN!9Q|()T V`1^2?$DD?0}њh<Ŋsr[wcALΠ e2)̀2p& @H ]I'r=r6S8ѷzZ4M!qUn>~B8-{VEPdP>:wDf",. ew MA*emN/̵(,;|$8y@$H 'KȺ<І *(YW ^GpYZ endstream endobj 328 0 obj <> stream xڛ={=XoR` endstream endobj 330 0 obj <> stream xڍw\I̺쪰ʎ30'bg3YrX`̢H9$YTL(*xN^Ϸ'DztwuUuuuU}-L,<&md! a:,j7 &:Vl zR t -%ii ,} Mg~3J3ZK,Je\//֮6m\<=}'O3Qpao+gh!1[X8[= ]m Ml Wx^6N6Vr{+ ީ7]]< w{LS&/uu44:u$zrWeg\~ుl8O lxZo,9z&zVm۬gUon=K=+=^^^^^%R;g? :ZQZQ}S`E𶏸ώ>>}Q M7y\'̘4`IALv2dJSM[1fcc]Yƞƙ5W>{3"3">ViRlYˢ 2i֫Z/KjEk^A"u;!̼cd_f43Zylrq+M-㨯nuvY4 m]D}$t!۳:QQTZSxg/G%M^ɡNZ7k`]8EWzZzgʰ5㿁.= Z'A[{v(UpC_zvЦ$J^K'%*ڥf|m,F3H=;na3+3gG di+ sşFX?9s4yBgf>a 0{ C2de3p'_a:hf"\|nidFH'q[ =6LmƱ;!wĴhG9??\Z}&˖Ɛ Q 2^8dEMӟqTakvc*6NԚS_a+{v. DrC6[JK0ߠ<@Đ:LACCHʓw.~&xD"rJK'b# od#,cv8iǼ9-_";HB2; ^A+z.hxMA:$Cc" `=@^VlHd!^G0[=FU3,_g/<[> /;U(<4s2Td&W3A",5AAI9> = F2qHh>bА[& 8U[ƅ{Y0FF~. _}Qmfns4Ǫ-n^-1/CRWB:ʺ Y^oW$| ɣQ wG(v - 0?͉귫D9Cp'auzM*" NPE둋 "8pTK gT3_ڴ~&PBrJLJGy?":-ڿ F/`-'X&ȉJO `n Uwx_Oe+f׼GB|d.nݬ9}H.U؁cz1@_aZh́W<% pxa2bdFS3&Gdh-B5q7FLR*-a7f?P׃3Jbr dm2  ` g ɧh|E`A2dRS255Bn< G!?NMG`= XT2©j}l!ÀY|1h_!uR`n.Um? LGtw?+G=R}CUjғK \ƃ%@;_CjKay0w+QoO@0 ­^ ?um&ۼš'@%ނ33ѳgTymp“[0Lq<E[u:'!V$f%3.塰Giu!3hNhg'lu$7= R=(7:a7i+XFz$ys"[ynȮ9y\-hʃԃфGB$KAbyE-S\'L쟂&28Ϳ&rD| ˯MG]5pkRU'|K[0W@;EΠZlgqDwJEl/u$h_DIJڜ{rL*L = vzpP'$Ɲ4_lz5 Ȟ9wNwb+MVy2}VmW,W}l}bK3Z(Gpq"NjNr~/y =OXt',`C"8I덝W:ҋԇ1K/_.F}`8hPqdρygB+xobRiCWq\u#ij%=Jxw$V M?cRd)T#M(鄒=![ [Zz?qk[Vm(APB}4k?Q qtq'*q?LF{UCt Z;{;:Fo[龘YhVuFfȌ0?#qWv h;G W.-0Ƣ0ôxN»N 8jq x ` )tkSSeEqI+.QD[Xre>K^Ws> ,h! G(1Tf7QPwWQDu-L(JKo ]eG9mq/pK :0q6cEsOcȗ(@HۈFSsk \,B `Շٯ.A_L9 /e6]DkY*/D汔ZO (o2/\A9\EeF3ⷢ&3ܜq;qx^w6 4|Y3rPt?%ۈ^{BP9U=Sf `*ugZJ7ΎWW>FHD`$h0eJ6Ϝt+,aG io a߾X\[ 1'N<C)ɏuJ `&zVI e;``Xte5ز;3Յ~9ntKYNB[†T A ܌Hh=\-ǙU~fe0ȉvClsy)$8ӘQw7p#|IOTh'J2Tj U3Z`XiW0=tr7afj0Ȝ.$e>3=#::19u3?5!$liT}7_D(ү&/2C_l)-ld`31%u f#Z9qL;deP g9x߆{`]m)q 0g?(9tڄ}6-cJ4vrRZ< ̱u.R[O;?#749Rt6&K5Ǐ+2 Gr ɳJGRr' ("CI3ѝЏG,Z}w$&aL4oˊM)ل:bG+8c"bf>z2X_5Z ,?lQӫx#{V`_*(b>]|Q1e~D cHĠ+RwZe*-aOf_<P,X >fGq%=3IYnj B;Pp kXs= ?R3x-^'"`)u'?#jgsb'.dh,FG([sd 5l(G$d|ZGFƕ;3*329 h2k{x"6Wb\GеozBғ/a3*m$U> m .x qWZxr 9/s* @;GpBjzl^g ID*fXp`(eqA Js/7/y«]~x>!?ta"uo3weyϤ[r.u05ͨoR+nC+GÞw~' m]W0 ) ~ޮ2z[]^wլx-t{Qs"'²V$Zb:q2tv%@S w)MQM!*ԃc ƹf~?}ؒxBZlxqPv,kw;2H"^le <1":ɧN/i/]?rnw2hkE^=:Տ@KCkS;AK' hu-k\asT#gr7!2콵ˌ, `A|rEBaoSQ]Jw*JZ{S͐>S xF7JtUB+A5@YLW$NF8yy3bXeŝQC HɉPE@* CSp$Uu}J9cˁ%(W5 {lB9 m#C}zQ3~vmFk6mk6v ]h$3;؄Yu`DBkn<ݧwQ ^ S`ڵ^B=8i%] y飉+Yr{& +Q̺s:KWT6??׮0Wͭ+e};swI}܊`M#Ϝ7OW׆ȍ/")ӢA9 b\io&-\BquEx'J?|@~((/t0Ѳc -\3qZ陗ƐsM|pb$t Cȕ!q$8WnmfS)])[$6eG'ןTQQPYɤ <풾)ev! y5N5Ve*k;XE:a}_hbBz\q{sHg0}'/fFEB'adPkY&xogQDzt{-2ӭhv!ڥ#->jG凜{4^h9]u|9Јפg::J8E[8)k޸rDqfZZ~j~pS_Paj.7N ~x k.T頇!^X*Eso<{}%]!KuƆ trm׎-;}_TWٔZY{9Sk:\L :64i֎~{Gy_7|fm=S{ʱS'X|Q.gUU&6WwpLVNJ~<2o? 4 i;^su*AwaL^c^eU[kѡC$.Ŭ_ctiqm]Ac)} bǡh-U,^a[Se oogf[ć\LDL,{IY C9Gxl|Q2gos*YW'huT  aAFEur[{.U—OGN.a:Ԇq rd?((XOnb_A|f|^{sS#J=?UFՠ\u&x*juÑYsS)RI&H)̫rIO ɐK9Ia+"͓Z/{:ɨNJ[6ڱݍ'ʏeN%޲1lI g1M2pԍb~IL0 K=ܭwI ʎ:5G*UN n.ђQPi oY8 o2M0wS meo8Θ]~a'A"$>6OPJ5/a~~Oev:u4.7{ 7 á NI Rq,\*uȵGX5!sYd{)ő[0lwɫխ YL_8]dGU7t=O AN wO(X^ړ4x yU #6Mt;O% 0"0{CzSm]x MIO©T3o'n/lϮ{$hݒ _?Hpl LjE3J9$}dֳO%0c'Yy󅉅LC醑67qQҁ?( Q 6hI e3[ 6Aqؑ0f Sp_d}ae!)06Tb+8*ق,,u;qbDžr3Vyc£.iG+9Id`#k䟐/!_Ohp 0; fB0`9kB;Μ/^lXjKyK)߄2j׌Vo7ljmesU"3O ~49B閠OUڎ\䵙Aa"^W] y YQQ6Ov`5Njn3-CĶ HishE'c砢nI嘆OLlEы Jú /H헧\%86ok{ .q rrs5ay\|Ա۲e 3Պk AWP OBn$,UBhAƟX4F80܋#% _䅀~(Wj"7t"<%![Aѫ?[b>%}CT La8xTHkh:އwo63f!oKF-؜*MF ptUVh1$H%THf4PXdLنq oFlK_&`uoN nBޯQ3f,qƩ`f;5Na=qI&^@wcd-Dh&~||\nux {ڜDj_'4p:{ɛO2r0u@">Vp?Tl bM-ҔD=uZBͥLJ/դҢ Bl 9Rdp砍ѫ!* "Nd[pj3bgyC_K #Y"Qs׭@$ݙYC23IFi5KցOi꒫C118b{%9d#p4p5җ]e3D6YoqD /%6db$cSU__Po v0׮TezM&8Ro *[n/Fh(2Bb\Chu޼܀)jz7<=܀B9Eo C[_˸x? hqwâQ*¡&x)kҵnnVVE^5E:>U 6=8dDe+}4 C#swթn[·}<$2:2*2R5 pš1V€?ꫪoD)#!QP~)H_/XO5HWqf_ }ƕko2E n!r߆,#!2tO* XBn 8q%#1+mD4fDf`X& ۄLEӖn xpơ')#ؙ1įdk s_a-(-2r$vB"Y)>}נK+|vL@.JF̸ۀ &?qo5FGDQ Z)PQ^ aixK>+A"flT= mHH,6y!!r|x#sju&vz;0>a) \;\&$%sH~r C?RgxLZBp@`X V~:B:,tKUl2Iy &Uyv}fVKDXm MJ̌+`ŽP? us̸ "㣲e[f^`J7y5i7P҇Ȁj6EPOM"lD)lgT[ԲC&H@l0a"Kxݓ3]OtT:Wm"Gd[^`7i)Z;x1埝9 22J*><()/** 4_mn͑$!`ཛ0NL;|{h/(J,#8/jǸY5ڟx~ p>s-յSN86;z+=XKN2@77+額8|^y\7KmpƋº}H8 C{^_~K\aۆe _].b}j_ !˛ϡ&+٬s*Pwn/{c_۵k|Y< F)dWR5.?{Bskts=go!7>MEVFWb,lWgmey>/޾,v7NWM4+W0z^6MΆ=̄R'cqJ/>e\6.AZKd4ULy&Ϸ\qj9;yKvYmp`*’cLCr#Vr,6#{#$j'!fO-0tRK{9!\\<.U 쟊`Bmo|VA[3> stream xk`.Ƭ_xv004O080p4C.bI8BJ˛?0rP|N kN5dZրr@a)v}AI^ endstream endobj 334 0 obj <> stream x]X TfqF83.QLPQ܉OP : A|h4MEpAAn(*1E-ϫ&Μ:5U߽ujxcH5&ĭIL'g{2DOd aw !ޗ6z}NV}h? xVqYՆ뜜&4gXmX\2&K6ů;Dֳ'a$G}`|׸ސc\Vh}`LBL1;k||5%⬸ޜ5ׇSs}~ giq =n0gi9ss#Q7M䜹I MfpnlΝp7ys>/7[-8. 䂸E\0…r0n xYÂ=9yb13 R b"TqRԂPSR~6mb;] /+/iXP~Rs1Żv2^X wI~x[IGwJe9 H:):Ӥ{k/wי_rf%Dâ.6ے/c~;mD)eÇ8rxTmƍ:uGjRmq#G-8M=!e߃y;\I\X|:'XWd֪̋*KBӸ֞Qd 0cM3ƈP ZWɃfs,i.Wv{Uē]^>G4ٵr+Z5-<8=1!jYbmSީ3:/,<2^][ژy?NO#)8DbA=TlTSnt17ȣ贇89*gM-.0~FXyڤ&y$P7b(Hnx}p14yx๦|AJZ1ʉf7Njс*R xjJ]:>&5/i_1G +Vxǎ9wJK>t\|U O/^;."s/Ɗje&oW`IsBǮqog铥Ө X<-FxxAoXm42:_{?AYv:96pIܼY̡ .Fr&l)pLPBkȹwV8Rt>aHE w]4Sr'ҙZ܋_*я^ h$V j8?A;Sy9~)IM 1e;IGA5:ujk_gi 8$ -/pv[ 021xP8W@g1#SQy].$=ߛ|+eKH1c63DA9,$d˿k'8 u/~DV T>\P_$C`ÙaRݮ>CU~yu[#R׿(s5~9ݺs;j0 Uddw~[Kh! }jPy ^0-SnmN /e~\RM}UԤ|=+UKQ$.5t*1bY'G4ԕڙ 86G V6K/ov":O ,a&LX=vX&%DmYwTjʪ(IP~u;s8 Q/1>!8CPUۤ=oO˾ Ɵx4=oypW<ѼM#}%XQpq=x, j|j⢳a?hT]NYIU:Q+c-R_ѹGnj~LsTm8Cw# m{ n,ͩ;J3E&JZO/>#jG/7?CϰpƒSu t1F/䜭<$٦ت~@udHG+[܊mʔV% YɅEXf\ۼpyu":0 ?ըbD!$3l%^׼hQk-Oyb6@fF9;4-|yٓ77SfgQ[,a@xLfJ^b.P:V¹}9574C]ɒjRm/VԢyHH=gp>☁AR(:̤)!25iS=E=1Y*CNO{WtBVν?V0Ňʱӕ":3aդlKgxS*s9K.qJt%2oqc pN3k˜G ĬmHyφNfxɂ%Ū I)ۇkv%\E ৪$_8,_ ʢ В][J= G ]~.™60AY$J 05&¸ˑOBKd˧rZ iuѐ ēU:ŏTRoqwZ'=up5(|MS* 5oNnIY{#,Wf2@Ubx@4|cIGSmo՚"ROd˒`H^-loK ]bll6N'jv$FxeB]N3kd}LikOCCFѝt=)1z6"I Rr:E?$Q̜B[+Pir|)i#%MXg ɦv^*&ͬ1_Z_P@4h_+$Bcjg?2zMs%SoK^isEJjPm$YY)҄^Z+ŧֽv[[uzzkm} *h endstream endobj 335 0 obj <> stream xڛifV. endstream endobj 274 0 obj <> stream xڽZms6~ߚL&n奭ݦ-8q$$ɿgP1 CdR"S*g+cTn&n*TeD∪E [ 4M@a 5FD`\+PkIW> "6+ UP0.X+*8 BpB**ȅT~G;R~v{o1Bt ֛S(s C>u)wX5S^wNݑ> GFrfJdz3yu5]ݶ޴o(()@ᛓ6N}{6:w^MSlxVi>i/47Y=L˵cw\y'c^ if?,?SN@&;]Xl}~^o|J!)Jzᓱ&i "@ڏER`>0Mϝ,6,1ghO=̊9· ) Tbiu5؛l-vnŢ3?`YmJAҟ)xq]"\{~Nӷ6zUb= ̵濴a^XL:>/pch@Z'&j77oo.||KS/[,YՇf޴k[+g+KvYm3~6g3~(~@YTʬVZYƞsX[iNuOMe 0ʤ f`(orohT+DTsf gqSnOM޼{_(<|M@}1^.ONakϖDWJ}0[򮝝<.(WM|Ҏ~^/W z1~'eq^GM.8foRZ{$T9fÆ]9Af>MV%#C1d.Itw,Gf;G F_;\[b9Lg lJ++7q-,ǭD1`,',0'Ӽ"/Ppˊd)d!#4DHXfL40dWT*b1$b,vрH/wSN&6VrKn/ʅiE ]k R *Wh:0ŏm%4zD) Xy/*JFF*bQdUh uj Z]iK4@aou%*O/Y>M__ٳ/z}:/d}tf궛r|2>\Ltvt!tݶzRggIE^LPd)xx*4]m/EwF~hJCat ~AAp};yyj:A}D=D~meenx~;Ʌw4i v5EKfͧ0Ƙm#h :7ŋ[e@kx5o5H'MYR}Lm mx3a5n5 *83c9֙|zקI4i ƫ4Ehs#sTAd;};-|gʯHga ǒngͿ^>Ol)3eg{Dq"}DQ flTo'~'1"yO@n ɀ>f+f, endstream endobj 336 0 obj <]/Root 1 0 R/Info 2 0 R/Size 337/W[1 3 2]/Filter/FlateDecode/Length 794>> stream x5YPOa9EB EJ !ZliW$;Y"{K[}߳˾se3. \&7ng~ysNR55?)`&fa_-;*[)6;z'A/ Q̩Kٛ<dzQiB/4P 6D?QA]~KcE%IӚ΁D`!CEe0zjK [aV[Jnb")}w,q7׶0QKJ{*z(QށvQ:+b:8&D㱋XjWVۓaw!VUI9{b&kw,ӟb64&OC#֗A%3~b}tvc_3 9`A8P&ֿ83g8s!Wރ< sqخFW֬1C~cc1O}funExO)q .Xekpdw܃g p" Lw~$<<^Kx'c%^e>],{xC||LPiSqB,J\q-7&܌[pwN܅pVn<0xq܇Wxo-U8|f9e@:O~}G}'csE?lM136;cklQIOXn b|I?S8fg-v/.1皜.vV]^b<Е_!H7g7?JEK endstream endobj startxref 253187 %%EOF actuar/inst/doc/simulation.pdf0000644000176200001440000020531015151412457016140 0ustar liggesusers%PDF-1.5 % 12 0 obj <> stream xXˎ6 +VEQ (.&ɤ.R")˶2\\1|>DigLѿӯ_Ooӏ8~zO 8Lo?NB`]W ~~gtγp_*oD }B k2 /|n6WlqAZY&'y & >45Nr,LgsVboja1.'>)L+dE[ ;;<;jYg񝁪)n1|6 %fv[φٙH¾-cط|ԐBL<4E3\PWUEbV%[Mts_ %yF5W+{&I9W[taT ^s"\sv T>O߸Q]{> hR.nc0 E{ZMt]j=KY杫V.b5Tm]xk /(cPV ȥcԋBֺ:ս.E})*CfSS7MVՁگN(Mo$\ɛfýY/xhu)RTxh찆[XuNJEE!T5njQ*6CW$_Rizi(bUkrYF9hY*dG!xvԗu-".Ц5`\| ؞ t88~RE87Dң]5_C "ܻ?iDkB[8s7Y-]]k@ S1m/yM$]a{oI܎ZjMReoZ*k3V6nDl Ȅ8ʇ' Ճt_䓷uӪwBY5gyQ9;k|!U71eɺ%VvI73wfu1hY}EdHO[ھG7*2EU$g5Sguy50Ƥ]vl' BmUHq|$ők{5gp=5R8r>UkOMl0s4Xʑ\dPJg}b,OڻHzu0^Csl thMA<6/vwmk q5;,X@ASekL/Nn>5KoGSûS4:2bc;r FaIpt$ J|险=Ds]ـ/gKc%4GEy,')2#, Ȑ@$;M;/!еr4tUv,u/L--((Fjs.;z endstream endobj 18 0 obj <> stream xˎ힯"RoЇmҷ z%sCJ,$,zu.5tۉe'A2_TLߎV}ifU@(VC73mOK%:\:wYU*>Xg*KW^e/K,i^i'bhueȼFdZLݢ6mH7eE-0v-$4UIG:ip3, s@E<-B0V;F cLx :Z=]+ԀMlСG[CFvu*cm{@^r3Vs>9_iY>aOQ|)9 [PE d!= s9u۵ J|J|$HCSI#%`x ),VbWgT j.bc6/2B\ YyTsCYaBE|8ډ.BV1ǟb03bH^5E@@4;䤷:>暃IӅinpe *{TZy+8ka~ac(c%PSzmvMm#.Ə\jѼ8ź"%%ۉ9՟NJhg%Li{1UMT#zi&kUnؒ"׬:+BVl:>Ư*Eea4wO F'&\i*D2CSոkieC.U^kz[MF!`;K1q"pңUT/g6f]iMD+W0|v](RYsyWMŌnJ4f[GqnkMơͰ%`@s\jf ziZܫjkȦ+s>Ia]z^Lk]P3\gbmJ/DM#lOј-4+.o-Ff/S@5jCe]RZDwS`3K1Y匫cއunP=H*ZhVƥɤ;!U);@8mdu h~1Nchlj]iz"1tXaj~N`,ǚZ?K8X_onƒᖯc4څh({^mGzג_r ϚCWm yOJr)DKz}ס0 ]oV8=mDNCyW]e8fz!g՜ѿxu|s;Q kH)F}S,&rty2Dž.צ: pgƴ|H kkqlK_ِr$K_e.KZ:?ppz )= )bMŠ'(! ǃL9ywbP&xC$Qv=*lP6QQF,N,!⾤q5[ ? .;%6*Q74o[CzϿyز!"zT^]y{5qF9*l3βg=֙u!]Oڲ6(=*%tdȁQE:bAb _B';}q46${Hd4 GʏTtb$K)Q&Kc]М 㹏{Wƶ=&Չj:-'f[B?FN{όZ/-T|ԊHK3UZVk,\`v2k")!=@yI <6 \PN; ?s%u>~mq$玧GJ7 )y\a 庹dOӵ9JWfEФvXOނ [70$ @p8Fx. 82Cy[x{5_A}zǡw[tϧ ROL:B#%*:2RsVKGg<}`mOwa+9XB [6ͦVmM|lXɒE`%tU#g.e wvM;沭8Xkk;X0WI>h^v=8OUu|p_t Aak^iM>hn忟v|ugUZ53-+ EX2tZ{o3ey` Ai^KE<ui7!HYS}H).ݜToM ʡݞj\eC(_I AC4n1nQ<4 endstream endobj 22 0 obj <> stream xZɎ#+D3 :m>Txdpd*aT$c}?XP?~_ޖ?"}Z`Z>qNMpyϗc?]04 N;8%.'%TC@^ՌۥQ~EZI6s=1"d>ՑKp)wV{;M+(}T氼wL'4U͙GP?LoPNZ)haM`I+|F.omeRw4_nnIWe,ۻe ,#tVT@PU/Hg*[xmEszdM X) П'*zrNWF|0r_#Z|"\ÌtU?ޡs ,l,cJ[ׇDM(Wz0ud;n1{Z[Ҟ̥&,}fpsŇgʴt+y"JXiCq&2[Ƞ-* g$װk4SlAV^:,F|uYϦ$'cPR!h> &^0t6ƋL /;E a^Tr\(rb kp|z Po\ES AGuBg<[-t3&uvYd2nDJ."+(!ChS6-'jSF]-I_}RVQ/'͹0'G8\}4-lU&ml},Jkr!`Rrv@0vs< wJ J%[3<ޕ340)Yjܗ WB:(lH66:gæ1pH8a}ް 5kq:Q-OދK?2Ӝ:b*y˜ko J1I%vD~ƱcGwP{+2^Mx/6I|)9N/֪'UWk kFx3b~9~~l;@qEU.^GqL808uˠ\[<3ϫ1kk":=x;KS_p?mº,C endstream endobj 27 0 obj <> stream xڭZK6h7Ç6rKַsI%?_%R5ִDǪbQ˷xr/~Q].p!.wvc+azo C+ampN/d,_]hXA\NJq|Y__n,V RYS뻔nhdn%\U֕yֱӱZ?jɬ5F˳>Q)Z/k+/d du,O/xm^3YfeI:=vF9kءJ׌:ܹ̄VM+w7VuH6ӷb V;*ӝXؽmϸd7q:p,S,OtBIxrF/|}-BH8jJk8Oyc܁>y07F!tpAMdGl@h1y>-…^MzIl+.^IWs O=y$+71a$Ҹ^>,?-.G_iVRXK>!8Az+gP;d=?I`K9ƪ^{%gbC&\%얈VUK,A[S_;4Yؚ!&'2er[ \2 B hrPR*+)K+î=[Vg ޙ? _]v@}^r}6o֨ZzFFm}¬E_dҡ,~ a |Ss- yPƾ `!mC:i^2&ᛔ?셲&Ioe\y*|muN;bbB aԌwTO@:"MVD6äM8xfW][%yef(tyB@2XFuT)'pf4.5m-e޳Xfֹ6QGz3`3yG7"ɕ:J*eyȻfeTϔ|x (qqf˩ncx` :!E11uUi+ZtJa_ iφRxǥ!po5gNXg+L [Ro5o/z&YA2dm,$f6<քoXFa*bIV- oFu| =q#aЄoŃ۲@]n3lLZ6F.+V+escOKb;2̾ 胁D,Em$n HA!I-H@(6j+@ (vd1"!Mtdu (hv 9b*_1Z$gb9VS*"f. 4Õ@a~U |{[?UN鬈@X  Ǫk͢A{5> rHUZ1JVťӲjӔy̥OZbVIU< N5~k}>Q9LfŠ\RIhjSܗJw-՞z-{\ΩTR5'|x|c>|֣|l=EF̮~XdA|fʙ(17gٹurv^\ :!邯DS+(sM"Ibs i`[$SwTTy/U{Za%Kwrg[YN6XݬL$=>i72>kBѩǍl*l^>I2_vAW4/:6%RA-]q~}B'ц)7^_KRl 6cတħ>60<6biČزQw)2)/Dg+UM#>H94$a)[Dy>HkqaM_7+6/Vl'CIm:9aLi:fI3rX]>l?_Y endstream endobj 36 0 obj <> stream xڽZˮ +z ("@2@v.k63M~?HIl[nm<|_`QZn/x˷3 Xoř5|\'U(JS^7`~~9i36A}ד.x63QSΤoU1@Եq54Q@VԚTi[-گR?.sܴ'\7K b䏙ŷ/^HTS4B-Z͔IᵥHJ`YGYsbPmi_Y3 0s ZPa@*dZma,cxo<Tش; ȨTQNh4juD F$ 1ncퟥl‰V]F+'pb-]<*2[f35IsĶ!gUG~e:C'IQV_EpSMz'.X0JIDO$G/(@Ψ4FM5>Twޔvrr 5̼LIud.RauЬ]vK i =qKڷ3%N2jTJǻϤG "!_9!ޔ!G&be"_2Bk̨^1un+'밄oX&TI:qh} @F)2.([iId?00a].= nEsāNeɇ5ܶ_4[8M^7J%R%`Gt\\cܰ[-#*w5^Qk V* 0":,'Vem7fGXnlE3x>l~bH769dqa{j(>#Ν ]ߌ}/@MJ$'< zv9ueCS%Du{.z$f"Iq$Λbk&bU:UExkb= ]v=RKtB̺ЖQ nqϷ ƫ@؂ 6µ2a^C waRrtcIM$iQjoCnח=PPNMCܡ>@@2_8tS6.Q~XڌnIkH --#6TKQr]ڢ8Wr*d,Ki>2{#<֫J QV^pI6"O^KV* fjE_m ;R8No"'3t‚>)g8j39J6}m`Yݭuk}5a^7¤7r4DVZ)D!EWB&DzTvNI5^7zL WQvdl.Et6XyR!|L@{/ZqN>w#xF4MdNN$YkW*V6[%D΢sYrnӝ0zF"l20xfS,7vllʚ+plÉflj U{6c$~$꫊'Narx͏$mXmLף؉G/n^aј:"Aj~! J)Ql 9ш6AGڱV-N[Nc' A8 zo긽lBZJ&4Cm\shRͺF|sW&Š> >t8)QgNc&]\'jyģԬ@UchX5#'EG[VYU' c;1Mi?>n m%)eزszwЃZ[\=(>y&  eQbЙ!ښseP;&P*:؞6}7] 649wpC2E3 t^ϼא&X%Y1`]d mp ށ :ȬTeu 9 endstream endobj 40 0 obj <> stream x,-W-1֞p0Ӈo6)ր1S*I6,SA$Z_%Y}M*DQ撿\JVLCAE?_Eљ.;<vjjySI2Kox;KVؕ6 DC)"&uM?l < K І~MMOzHbڕ~U|s+®?ks4Se`]RCkniLW,*K]Ν>kki#sH|_0Tu&ZM˻"k{)t0eBKlBmn\ɳ6`)?h23h_Z+z$ZN v$sF9c(e xN:QD)UduEz"sթt] څ 2L<1NJzkg{kﵒJtuEON|lBlκU.؝qUA̧c&?1Ƙk2+:ĹlǠܣF] ' V;BSUǠfZ "6#K +08cCQe;Y^XXdn~  CMrr[*[22!7&㗙C${_"?^屎8q&D̩.9WcD^嘑q^QE5Oٛe6y0w֟=E"V؜u\9J| ,1RkDݭq@:e ZtjZv7{P<-Uphxn_s/ѐcedٌ 1˝&T~䘾A`h}Dkw >Yq@j|P^}04 9/yb>&c#4ĺli8-Uqn(vL3E y 2ͬalRR8lJ٨xtBK^\$1a\⠿|'4%m"i׾&QYVvHP¹#`zidK~#/E5s .W(Br,~_ I 7V嫌a*U[ޤB_p}NXeuKٹ4bmtY&z~될e܂ fAj"7`Oxh^ѐ$`@m:VFޛ d;slÖhAHA]hă^^$$:Bd%{iTEId QgTSok(~j!L=h4m6AۄvqK^gW5YBz\=+q.- A`liTb*K_L>9);WisjH1~u0M;hg)9mb̌qyxLpkTǽ4*/y@%gUdqy\N&X&xģ!}^mז͊ ؑwx" OvPҬr2 5:Zq8nǰÉia"J[%)ZnWpr{_\eWxN0G@Wj# ?PnȬ\ej6m=Cjc3 {k |Oƛ M.Յk9  Rqc^DaZ*>Ƞʷ5뀂lqzXJ?ᵐaE)Sse KٕtzU:w/Hp 5iǡ`i\Q,W4+E1L'iRJemZ{UݬCy̏|G<ɹ]]n .o%^.u:szNRQV:58A̓' endstream endobj 45 0 obj <> stream xڽZn,7WX4z ]f1 %U[%Q(!w2' ?,ӧxd*)i\8Ŭ6Q]wy3%×oxYG_fXIu*.[Gq^_ l+K `1ľ?hO>oG f&Z1f!TBqNDM!r}7Uq ~;|Z3;j7&.]pMR597OkƍB&FBxٟ7.,D tD Γ5Ufmޮ;zkf$wm]끔XKh/ֵ5|q2Yic7Ea7}mNcpr# 1]eDgaCNY.>92U95U<#gߨ66YZ^kGa$,V4)0_a̠kJTT6A^E@ƎvA Мzd݁bwỪ@OM3P( 0 zx>I8hJKV;V6\jIK[`?YY3wY  O-CˠރV.nj$7IoJ Lfk}`ߕ_D"p1uʏedf:PʸE |jBr\4-^cYS>_kӃ\g0M{6bmbwF׏qkG>v{gq'ܥ:lcR.{jҽ7zaoqXiS0,QB @a="`Ty=޴F qm)ذ)O7\Re3rggʕʙQ}Nk#keG[&PedH',-?0, HCfdYba$Meоlt9p”`)6RAԑJ#I O'2 #D$0`­'? l HZQB7 (RDYJ$-SaٌGy*îmZI%HK5-I4BKۮ|Fs+E|k=4eyAIYWHI%%MćJ )}P! (`S įg%bdl5*_u5撢ϓD%:-1J ȾrግVqвw>1D<P@f8yt -Y>;sNrm)^VESsʍRR v@g86N%~e5(k!>le*W7m`o:U-,A_KE!J.r*;AiEeA0牴AD҃mӆ&# ?d!2ˮl7L 8rġ/q`/fWT8oJ =+&qdmṇtӲBi +ܜh]Q2%>G/eq1fNJYY/R<`0LUh+pf(úMۗ 2p&?Xm:[JrK&P3L nPU,q;hi7Oklv:5-eK㭥yZ Y wxpc]̎ OlV8$Ӄ=pVN~P2%]w> stream x$-W,E L`>ٛfq/K$U͚VI$ER| 6M`CCv~D/we`N}mZBT}y=F ?lyF463}k 鱼&yݫq!;P߉f%!yc UjJwz"- xaF$:ಝK&Cz *oq  MBgQ%ot_7xI ɂUHfsزyny$M m_lALJ4)C4sK5(m$|_J";3ԇҿmQ*3` je//geSBQN Qi]JP+S^޷cBO4&~Ba2JՈgut\C#GNduy#1Nz#R|_u^βTWջsRVquol)1Uu~yK'Ql;tf y&=WN[ELGLu 2Ͱ$UeL[Y*^7S)LwPM"k~*B|>=|~ -kxg]s*G\jYf*LG>Ƶs8>x  0*jCVmt/pQ'UL77T;!9z Jw8ϊQEO\0Ny+ηЧ < *9eJGT_r'_^%$t,6y;E$dW k 6(Vo-x66dRV@1R#0 YQܸS~Iup&nb,drLUْU=vi. Ű BK~ /zq5s!i>~$0 })p~|@T|ԋI%X fy H;"ȾB|*QtL+R@*J'^}LJze71i'dq prҠ}Z"Pb' WGL:NR%w)2IS *D1AP+'U =P+~ZD+%EΪu"PI geQ1%6BH(o׾pt5AJSLza*AX$"KgȽ` Dl!Հ~N ^@yV*{:8geB޳{FNEHOO%Up>Ai9}+[ڟS5!pnrrn\ 埞2^Gb zͥ$QNΉyٜH!Wb1e$& m$s-ّkfܥ\2S,+Zkč}W]4fGa/LvR/h DY+33dk1g| xLdx ,з?cni<0;[Nš6CvOq9ӭ1 W QA"!dޙ7j?d:fǪ2N͐I/ ; '0ݠj$B~Dڡjܓ()7uuї[Ygtyj4.Jv"vxn ;*5,Ǽ̪~ĬEnrpHpf(Q2|j=Ec 51jі.MJL}o.3RRBS)ʎZJUքVȣ󯻖=o+#ГchhLUQx z9hi!oLs:Ⱶ ڗ>: x8ה]bpG_Dkux.aH+|H œ-.>Cdq5A/S}Ru*V.595ᄉ;S|3T> stream xXɎ6+fX >Hm9\2K~?EVq,yk4 KdX&@(4+>meJډ2$'v% k?gDEtSepaFepV|?//ߜH2BrЭ(vJ^);Y /stW~:;BKg"#s@ x{OL0ѽݣ6sY-uֹh6-NsiVnK3uݾ8klZN#AXa2)yevQ:|`ek7kq-xP͡#;՝x#ʟv;q3~sJ{tא2S#5XP#DT0w{ZH۽9 k} IfGC;Jr" =]joFJb03=#rVxDuJS;&B7(YQԩVSrc,seZ%PƁ&c< ,Q*kK @Laro4 X4'RVB4,Vi%.׎ 䝉VUv>dFzs9"]R> stream xX6 y<:pz=7MH^' %S}UVYuB_|S١K!QmW_zMp:T'JuJ׏w*D2c$tV OǟQelH訰&/Ƹd 1Dy7T? vނ1X?-,%-@F І%q-v2mas5ذA /NxSf#$xz.L [@xfzF}z~Bs'j0_FGgZm`oi &z3OrDgIeiCYXz˙mVLg.AO;"<݈8G&0}XkS.XK'* 8 M /2 ׃3v#d>u7Ѕh hj:$G=~zA>by99hT Gd;anGӮVy' y٧89S$MtsI-m%:'f1D(I84g0jbW~ōRL;X^Yhp.54;zlĦ$1fP-kʍk I<Vݎb& Qs4IKqoP* }|smh) ^R Tۦ_z'-8eԜLIs1 SV;"{(6G^ a1oF6DHƭ ǵCՏ-q3mAӆgnumb]mn t^'ӄNk793y}4WǘeF TJn_|, cESlS=b8?w*owjplkz~)0Dq KkZt@A­IöZrWY` q8LZ=[#Q.gPGG碎MXl |S%O endstream endobj 59 0 obj <> stream xX7|1M]B_x2? k3_ט=]=6.L=h{|˳ח.&X׿Lb^o_o^ϭ4 endstream endobj 62 0 obj <> stream xY͎6 )VEJ0C6@oiV0ޙɥ9$~)׳ aYOH>M0iGǏO,bD*i}Pa:*hgszJY a]opDj5sf6~|Ϩ8y~XQhz8uڜpz:"815 ?! 2-Ѱ+/er<[oU6\)+f2zMFKW:ޭӈ˰"_h6"c:3T&حrN&D#QS&2WaCA|Li3)- ]wPr~r]z9s#U@N< %{I4 v}FXeq8k#g"L8yeF:%+OsZQeT 2Teq5kσM0p~p m9ՊKf̃=#dO2`[鹵q*  tG:;'HzFUόr^ץD\O 0hݟ :-B9dNf,f\ScB۳ pErekaϠbǢH N<{l٧䌇w쟭 (Aaf?Cܪ a+/II6RmkS.n\/rxuM}FNV_|nZp{kl]Ŧd#X0Q)fpS55\[ nѪ\ńnnIJ+7QKYT4Ok$[[Olٛ L5[/7;<T!|"5Ok | 5{݋gO t kdj0* lW-NJW:ʯV*Rrq)] {Wq-WZco4GH;!8)Ì2u癡 סt)W@->oAiߔ5y[#s*ʂ[i""u6+''%v6td/k0D.pCԺ6L^6] 6+ ^sn+y`t F-o?DuB u1#t1-çiCnMx3ɭ(]xWdjd/dPE#|> stream xXO: )b,۲왝̃n@o Z.\G8I,´',,˲"MPAJ|o}Y,+C(h%*+#iIxZY^{==rM t9#I&qBg/^Bb;/:2;g (|ub2L怋miY!3 Ċtah:MRPT<Z(eH)}mq/"Z::- /q2mG͑piiU S4fLQҲ+s: rۢT2_;~SnJ`ޝƌ^ˠ$K0D5v@iPotaQI3KOfv?!೩T{T|q#5YgyKxsc2+ ښ}=\cabbNgxӢ PV4鈉8 F;gSa"J"d{_ BYB㳶;_lBW@}zc W`ɜz}Sskvki"AnpOJfaSP.-<>#N}Zi [8Ձ@vȼ6Ф&鬦t>ia7l779-B"ЋK_'Q3~11>bn際'G>o4Mڻ`m\HS_!*BM RL,]A򬛸*G8)Ұ봱wL1qL`]d(ST`ԧ8Q!K:HCBQǗ cf12PzDLv&8DɄ0ypEHMƷC6 e=E[iړt8nr.,JcG?uNYqt橠|:WZQj^PDGDž\K7g(yfa %b/0Ybڋ\uOV<TM l9(|l^qMS^'եE ǭ[V77b+/\{`y>hj>Ebj[@Ek endstream endobj 70 0 obj <> stream xYKo$'W 4!RRn-az{YK~ (hf#JWmR59_9_Ə'!NNh 8(!LuƂp_7J܅ بF+Փr^hpy`\De jzLVX~9-r{/>IPJy?/NExJY-,>0DA x)[r%D=%mA E)E^ KhB*vcYiF϶ kb5Æ2[U:&j yKP:M܇NPlTql[jڛw:^LԏT Pe*s5_Uykw_(^ ]k&HX6!C8P]JF"1i3z!qqKnvF|x(7IaSҠZA9 'ӱQ53c>L:'E v/Y8QTu6ŵq˅fW;sՙ#_}S Ks83GuؔjUK"c̾!V=yGN=JVuyAcQTgdfߢ))SxVg)q"LϾ9dYN>)uvUs H|:^qb垣EIӭpDH5>[l :wʇ]3l4eO e;.YA<#PIFú5NWϩr[1e#&T3C̣]@I҄=Aş ֓PE5=2OMux}J<ۈvj.Zqx.S%)VomůJJNjkaԱE.l3 #scCF8[SX6HĺseojAS3@K]p)o_fj&{irԌ[c'%\Ⱥ\K|\B' +bOj-'GdF˱j1xTPB> stream xZK#7W"J 0| dCmf.ԓ*=3W$# , wzDׯ^J:r[U! h5hߗђ S{'i:φB;b!y< +щ4j׈SƊ{0wFIۍFDZc,/[^j/$0{'NVeH!F^BJjq=J&,co^=+\hXMNǢt}c2_VK;!C*@hv n;i-0QW[֩%_Z0!*AC4JWeC) rZ)٤SW&_7eqܞ$>eћ˞dP4e::t[b{u[UeL["Wq=譞jVډ1$y_b؋[Jк8aFeI@E&}c @$@Yg'붜웙 CX5sVm2Zs1RDZ* {4>vG;qɽjhӞmnۘw6 䰬_>/{ X$ї=FbeV=o^ȆHiλ _}/#]^AmUPF%"l" g^v$Ec8~w'ᐊj:kurF35CrZ/7M̮ ųstE"PGz}禾ʴ4]6薔 f)MD~ CUsWAĝ"^Y؁Y{ߜ4>Y2,[i OO29؀l]!t OmӆX(I./ťĪ\ Cfom6I$mN(Gψ'hH5+:ۚt!mM9p?Րd2θcvFۇ܍>SvPNX26S|}&*eLPczS?ݖ endstream endobj 78 0 obj <> stream xڕT +Rġn*V0쩇6V^k?goM$W_ ͐, ƜeJ6z2 "r@wlwBu=Ōt*#bQB( \0Q򪶲=Q\q6w 1 )Ú71&rNrTsan4IQS!OsݱG}t/d1gsagYDZɝzJd}=Z':ISWU(ynܲCy=tТ1DZm1&JQZD~#Dj\_&?["ƒRNLe$ {J> stream x}Qk0y$Z[Akú1({z6JlKrm/;N;b?˦H#U-J ka1Nʺ2☷?:?^}%?rV#%T->pHϧ!էN(*'YŞ6qjFҾmpjS s+$XZ _9vUKfst⇚InjwP;12ν`,2%G0₸RȆ\/U~gKy}zCG Śҙ#HҐ3fH>R0j%AH.>wkq}ſ3)TЛ}EZE/*-Shu}mZ2" endstream endobj 181 0 obj <> stream x]ok0)c"vƴ 5 ѲҲ5`=;(ץFi*I a'̀ wltdw7Fm.߶U]~߲q^A߭wƄC Y <З$Im$-l5hf: ;(Ϧ%,)L~ߵ endstream endobj 182 0 obj <> stream x]Mk@=gL"$CCu qwY_1)tAygy7+RɁ_V HU[h+R9~j)-:n7;r;`n}*ҫ\1!n yuJh$cT&ˮWxc+U^.Yj4yNu>pěNʒb:Rյ?ܢ:Ԟ)RL!!R퉶Ha(^r9ў ʑbis@w_;yZ@DWيt8'eHʈᚂb0 S+ ]hpcp|Rm*|~c: endstream endobj 183 0 obj <> stream xڍMk0:vYJ4dݲIa6k9Ր4y=uySlҷ T7Uo/ {3Y`U]Rtu/_Űqcs}Q*s01<=|^sɛS˶ck{'{2}ݜkr@ru݇f`|a8 me]Qhfٱnea{;Eo{R ݨ$!Q*IypͨHH%2RU <'5MG91E"$1))@y)<_~9#,@nZgwQ+*H~FwR^e|\ @R*Cl̐n<&O$t$$8IIh".zȆHHL$s$#;\D "#]D\v~H\*&r)+L͉Pvp}I$r]9衎#)y:0 cuU7n?[,Ex[ŗZZ/$Y: ow2x,dY('H` Ηqw #vuc)޵x ?+t endstream endobj 184 0 obj <> stream x}j@}L)AW B = e36 ?'g$K3f^`XZVwY 3~EW*NKRvmo"{/23]!yqRܙ4? L6="1{;F>ź+l=[-Qt s8p} *R,ڌ'@WwU5ԨF> stream x]j0O1-eg ®"Hk[h5 Q}5#{h / Yռ^#JɇUn3|5ZmBq#l(>huUS?`WFC{0gS`sƗe Ry۩Fc~C5@%KRq\ hjыy$H.O dYnk endstream endobj 186 0 obj <> stream x]Pj0 +tIa=H.iٮ琿_&@OzE=($a~bp>EOR8XcG\_e|f]= ʴ<{4Uv$MKQGaC> stream xڝZwXT׶a- vX,PX-R,\=,B]=~!k=,BdXckhVWn_T>[C,U^>*Tdy>^ޡ֖V- |Ep/bb6qkiwPD$"FĊXK[d"#LEhH&2 wbCq1#fHl,%-6K}ŦbN<@,ŃrbF>3FC04DVMGs\9E(Tt2NK",\.ZnD?C/~ldo.!)#+!2Uy疞 ƁP/u/۹wIIgfPizU}S}ԯOc?7]q3#~2kvgy4l><9 8h頄AyjRT^!Q6j+q D+`웳(4'?deXPȄxg!d`sF6vԂUN%5<6,6g8{:O&'1!2@V_VYs \[ڄqV6 զ;a831!zxwBYl#vߡPCk_07xhcV+|!W 2;Y0"t[!I3AOA7\hc_G] ,?gT\uj~$*Ɩ(B~,$$#3C7cnn2K2J\8HۜfXzl%}+]sIvKmzWaZRD a,5 mЌxm?z"G+PZw i'BKcmqDPH=0[H,]g9QqH`(^:2siT,w',6)f[O=1Rp?'}H0 4wǖ&Љ u<-UB# ˡ,Gbi7ebwXݝ/xiRCf`2tttggéG։qV[Nxn@oR }iQދl~/bwPxIEV?@8ً5`ct'"; "}ҝwl" *BtnUN/°0 =ļsWIJhձrb A .O9HGv( 6;>\b$ _힔kijpX@᠙$=ݣ<\̮ؖu НL5?B34v<h"{ RgE]4#L% 1Qf~;7.j&MP͋*ma5|"t&%^B:C(ٻfc 4"&}9붅KHNpo{$ȄE~mAD&br;YA͐Jdk-B\ۨ:VfGnEF sZoYo$'l"aq$h-!.Lh:V1a|z~=$=Bl? =4Y-pi3#ܥU0 %T*` <w-I&u(2 jbgP!ԡLԣ̲L**: sJ"dN- ӑY-"&62g"Z5t6Pz8tc,OV컿uj]4A/u69쵨>'UY[Wo1XabVg|J{B%C9pD@$;"`45&~!ᙌjR+2ʩG7 NtEjw;p(;H|;cá\#|E7eQiFn^~EWcڬO`nfy7j_f4:6Jr٢ nhxaaN]ޓ䳜y02mC7=Y Ei3mհps)}5# aw$AD\\C,d'X,d  .˯#e}k?V]nоT0>+iծcHB? 3ued7"ڕd 0i+~~.-Ǥewf=-$&:K'xR1{ e&1+:,*OGK#11%׼G3ɬN0q%y֌* UDCVc72RA>揥 =x;!IF _ԍf@9R NPap`+ U'(D-L\3 C%.ژ\yYAn;D'w=0eXHba~(#fP"DȽi 8c DGEt檔{֞=2:96Q9 cX;V0/H,ΓdJtT! qeHd7ɐO?_wA~CukDt^rb`=١{eٱZ]DG(·mF b"^voyt첳tyĵ b.6#~xE*ӵIŷf_tj{!oɈ̛~@鹙{6(\Y~7N,yݣpC>z:eeE +>#6=):C_~V[yl1m$"8KXp1b0O;kiwr5%A nRөPG'C "ԣB u(3iv LAJ~ b:|F2`eSgX"뤐 0ٴ]?@zKqD>e6 ށ,%b1`Gͳ`B:1/eO>klFqaNjah+7_uJ٬}{sԕk̏# U6}qk{eYv&n.B$,߉ qFYoZvg9`M XM11G(D"3l]Qa}v>^MQ~ىژR=OeN#2 n*rU djcxZ-]Z6OAc@_50d%iiΝ;_@i=5ұ\ge
    ƺrwG 3^#.- H?%`hHHjlb\wM> G0AUg|ݻ#kf(Otٿ'>0|Q:NӚ +x &T)ߩcx!}MeGx.reϦFMfo g`eFBisZ8cnsI+byn&BVSڭGcc+aj2akI3sr YU{}uLw~ك>᝶@mr)"q4N»gFr U#߸]9{ }E*4ٝ(GYv$=4|=rEp0i/(jͤ!,HV'jv$q Mq:BB SBQ(7tWcC1&2Ůrr{SGUbCKdѮtk2t`qy$3nC6Lf|Y.o:/5#8Zqw-r 0ΧvgV7O`Q6hG5C &Ir2 X~Adwku6[nT{^~!nl%-A¯)J&Heߛ}FtO3ԘZ#wowЧɸw7< c*KrWT=N>k=AdSz6Q^Ekח<ѿ}v_p;]u sP&r0f#ct@_e-m020քDb'[/?mG<&mR?q&D7k]-E\ˈq] da6^Zz=-\'4]~ endstream endobj 190 0 obj <> stream xڛ܀`qi `9 6 endstream endobj 192 0 obj <> stream xUW \֟3f T]>ĵZA\AB v"AE w-[;ֽZkZ}VZ}goΜ9 ӿ?#H>Ama66j^|L큯8\ #bv`ķ#sY3X;i,ulH7 GFAFF.:01Fp ϨgOkLJi}µq(&wn6 иpu-SUBõIdu|:@U%Sˍm`~VcH҄yIVϏj'Rw|¦HA6y>G,)>ګn4ⓒ:!!#2ʠ檉 95_~S; N L7A=3Q2Ø7T~D&a%Hb/q8JA! C.(墿(R]f62$1~nIGIk?7+ #l8{Uvav? X>gۯϷz20 3Cg׏fYu-|KЙ׼RLozSX(6e  LcT/v`ǺrCps)T 5>ly ĕH#*q[+hcCJ.bc>І Ϫ=-;iWpp 0  @>Y!ـJ5ve\;yg\Aiy7a4UVwEƒ$yJ2oSPp ?Y":qP6ȒzC YQL[Xtxa dr&@Iq'M;[rL8zX6E^3 WdYUgMQުo+EO'{^G4m{<+a&h 11]Zt_< ~ bQZ sbq32BlpN=& Lҟ6C˿8Td@N- u9uE3Ls()$J ^v+vU;V&(l,vV|ЙhTa\K^6.D oƍEġ\g(?n?\% ',KۺŴO8؎!'ijMU JϚ ӄ`uœ92\u*S+e|L)N}ѿqNI#KŅ0MHZȏI(%\c^Yt5CM& s3[RD߳0ޒgxUDPÞ76-乻 6h֯9,w!ZBXgڢQEĂ|c#A5c$7LВ-lv[{bO=^.(=b1tM\[sY6QQ9)rkF~6Sڪ.tK3& j2qt^:A8I"C c~aS2yd#t+wFބq*oun\2]-ȷKd+x?[\2\! V4+:=X߸ʃ){1qQ>0Ȧd, q= Nw5Ki5 -~-#fKC'q ]{!+тgr_brrCUA4j+53uˍZ>]^".:cs`ٲ7ꚏjLֵhuLa}1>Ew j$m*jd{ڴ}yqڒc ָ UW7ͅ)~Y [e9;0}ޜ ڴv3"X9K{$_ٖ/_* iTq=\!판z;ebEoD~]7g<^RԕV|Iv;@j ^}274+ȚUW0!2{+7r1d#615)ZD&,]"AN^ 0%Y `,'a8XLI3wa2)(euU%| OW(Z 2Ӷjp*Yk/m+*Vϛ^j7(5"FHX`A}E%G'hVʣ{ޕړJݶ=/2A-Rx+ӶBw$'Ɔ?浊ύX Ԋ7W! [un$D jRݕ4̄aR۩"إ@ f}v &ś?~IW(uvKGeV¬8`Yz*{@{OAsB.8+ت7x„Fq}|eoµS5Bf٩FKE%fū}}J3ˇSƭ +ï#kWBt5J~0X*p wu"i2W8HxD K l/&~ݴb oxyÕ􅌜e'e0n'O!T.kaM'ԏcp&.6!(;Q޿Zo ]2Y+PW䴿E3/nkď c k}FdPBHh~~?yeKB+Ȁw+Y>D"Љ0^' endstream endobj 194 0 obj <> stream xke+:`A} endstream endobj 196 0 obj <> stream x]X TTG~u->ԉv7 **;:*!WA7Htb0.Ј`DpAE7p%E'VgyΫzuun=QDQIΈ4ΉO\aBо`ZDN z^2c]T+oy+eۏha_j;@PMO3ŧ-I^u԰#$^5/HNLZwGhܤx}hailHGƧ'ҒcAæ lGoُe ւR B7A-t{A=Ypz ߄>V zCa0Hp> Wa0Zp{(93;^oW0t1?lNg@_X ;0H8N~gGltxv ˆ(Y3'xV7 )Vz IQM/A؇&Bͷ%`}OШ^[<>+ŔNNYř%p.pf 1}>I1%hDFLhx:.1ۅ/=(hO8b4 =KYqA>Y'` DM "C%-= e]c7aVh#gc.E̽v< f9;86J5W"hűrDRb>dJn} l 2K7%2ifol1"W.+>p$4,>nV],Lt;kfQJlf`O04c&nݳd3ygDTG޲YdLoHfr͂=HHt6Zj+ގZzaB⚘%a& 0wb#ҹ/MN[(ҜYRܾ` l Ȗ+'GWp(O<Cg~\$H7z{fi_Q~Eoc~m 0zәpÃO5-+j\*9haማID#n L|cn(imevűPOXZ;w"}2=aٻ.<5$#'h3`Qf^3uOpcVt̔zj5GT~3,`>=]­h1^0m%c^π-fV~K,k*P2+xR6Ҫ&1{Gs]ƑJ'^ 0GNbv,|El OF\ׄ+8[9(Le< [Y=OumNM/6Z.Zjbbx^Ľ|.4ZYkI75`C} 'pR -×HZ ѝkѷZE\AoNe4u`'͏֣Ld`_= 6nXx_-9{I f)6lÒ?Im}YB,Ҁ'D$0~"c&6V˦rJK8Z7llh_tfzx +! YϡUL8zGEhmڬSQyivoP`u |<°o,$\~0*2K jM ~lgQ%7-;`-ɘ`~j:$qrӎ2'+z7-Mv3' Cݳgӕ wɸ,̦4O f"T2njbE6)u٦yz!}jѹQlx\kտXOmL[BNjvȝI*6m_Vg>.ˬZK\mY*K }Rՙ**na9dJ̤ MXbt kJ1FpUJLxoүI0^ Au3 4-.y< 橊+*j<82}[g ie":z}T.ZDǗ3W֕dcq24T!ȬyWgS;nmӔrcl*hw38^hq^\-NЙۨ꧈=hkP=Ng; e&h5g~8ܗQm=s 'ETqdpBP`Xlx~p f{%Jj6 ɛ&aaŃvԋ7r(~n.U^ ~cۋRZaM*]gteN`;ػEsRyl0bei xg UWHW~:Nȁi}T-0O3gw&6M/mT,8{ ۢc7n xFM՚S;>:Aю<[kny<;Ť+APĸ(Y?1iQu:dbP#n5@Pj<_Բg4m[7Ic9m`: plcF_I",,s[뽹ߢW';w™Gj8p*d-%U/ WcEQ"fWo!J )uo&/T".;[o9ڳZϜp]Ϗ IrIm} dRj륛:$ynk@sϕ⮧ К_*%p";$PR&Yow ڱpZ5&Pg0R;4 ԱY7yB.83q| l'~9Xw:ГmdCS MWRXH"ǥD0!lPQ xϺxnS`4 (EɥEҾ%ez T6\ 3J|3 A'mvہic- ʥ88,WɒsU+hA:#,5Fڶ׶6`۵ֶkm ҂ endstream endobj 197 0 obj <> stream x{h7|/ endstream endobj 199 0 obj <> stream xڍyyXSwbOBjCZk*Wm-cyLPT "@AAAjZkZk;ߎ}͓᜵״{:RX,m*7}B>~`p}0HlN$b^$|CP" KKԝ7^4hH$M{B.uCm?쇉bO;gO&@NpoGxPSܐ"3}4^E}43/ '-}Zs.黼ھz-SVU?.EE=U8D#ǿ$C%\^ɟ)R! zSAT>uj^>^M3{A>8I=ѫ~ uXyyo>>}* J?鷮_ӏ c82A;8^迸 ;%J!;-f.@2l_QEU_>/KGU ?a"kןdcu?tc e OZ:u]fGw+kxzn{\qB%Q&6tb9SժK4e{8y",ZU |D "VhX)H%3օ~*nOTd9ǔt) dt,v*k)9okP_q+4iv `(^`<oa-{p a.!xlk]d4p㱄%wB1Aǚ4hJ5TA f#/FK^v/ 08WJNE\_qQ yRݚZƪdePu̪tSV!S(̖7Z~l7wY{_O07<&0M)OkW{R޸3'-}'7u}o G7\|$0SxjBJ›5O?5/Pz^,ݫ7 шNRr܆:oNI`V3o,5zqk*܏/B8y#Atڈ$D x=.& `\!!Nlԉa W0MR< a_p#0mϧm|+`́ow9dcGac3ü?DW@YnRvWv?o&[= Br~z*aB-Vf&  :uld0Wn4hco}!Qx 82T탉Gm ~@k:%S]f>%Ο, Y^oΈa5a8W`9ǦwhֆʛKN|ɜt|Y矀A a¤E2|9ZeΗNAmnWLD w@uзAB\ 2)y:=q_G |\Xw_-0[a}0^-nJJmŸaoQ۰>ŵ0╾Z/O.i.2o/T6pu*i s&x>>9}i82|q0MxX2/<&sOAa8M2mn]t-ӎMP5.Ʀ`נX/ل οS\>#G ;wq/vحunܮ a(|x"8 cf%K~ʟc}DJa7lāP4B%+, 0&lVop Q DOax5q X#&.B&@؄onl x7B "X^ZgYYdqfB&@tntv /ɸ/kuɠ79E%EV7R؁0DPBߺEtdrb ARb"=k(3*!~{ͻL ZP8NSnc",O.D܄:78X0HǨ\_t!gMu*x-|z AVmQD8rN[+ ^SsV}mT', J 8`K& W4\h_ƗZw }PE!k )%MY|CBs 3CU{×ON&:X"(zA9Zy",pE~1u;{eCg$驋|S6gGEF4ݔ_`^D7~4[~^Ӳ7WUkqtQ gŒwHbG UkU8a.t7ؑR%;3G7LE'ۦqb܌J ?ٖo(7c 3K8vRF9jUOܯ<|f ^(Y| # 3g$R30$mBrd6#k' ?|44 ^( O|=d;99Tv!pĽ@7ș^n95|Q觼5/<6)xdz.>˒:k$^{w!ZNd*ѽTcy{<@4} ӅA gT~4ʻv9VEefJL-E=ׯ 5 MB; V|0_SԌ_a'RˌEiE挘8# oBKiQMOkKTTLh@%3 O_ Lmdy`V_i*#{v"q;7fwM @af]]hmQP=/l@_f*uPYTmmY 1x['|zͻ^?=-b2lyiyMSvdV]{Nþ6SP,-W$F&G>gD{_=`N['Kp Jݺ-*F2ta7'/j3kؓ&B|D*]d4 8-;P`Gj[xeGVR` nHR!G(I[RpARbg]`-GtNE&[ BҌ;CoE5RFwtNQU .6nXmK]l(HΰE}͵~o';҃G`670uulAmq$.eXm Sw`}\EN6|a΄+bd25xP*!4L?j*Kl+|EE굮gqĦ SY_W\5!W )>a 6wrCf*3ZvgE" d0X^=OqT!56^l׆CQ]r5]tx.br)u>{C I:]' NZL2p4x䁈d hQ]^6O;^;*uKײZ})l^epD+(9ȶ7uOc ?s/PUQAAE~Eb!k.J(Yfg=t՚ܺZF "d1 CPvfOv\ MF(a_.StQ*v5>ݘC~rYT4%%2YIJ8do}5 cRR84k4o8+1tB}PRS/^==+`qB| uX(KRٱynnNˢ| ?ajot]}7JY6$Gכx[~T\ʆlAsyU/#$36n>'_pgN{- bQZ{N15U%#1;*N BtL*iT8eN9|.D00== R<0Q;$L?Dy淅$; ^[')l]w-s)jY#L'%}L)&%]kUazq7DӦX(?0]2'w ngOlZ)inDpu}ZxιUK@V[[Vib>dr7CaUq?a3j^Xv))m(KFNv_E h%k1Gm ΃\1ZK=CFjk>ӔzqXc.8{)ba2|:}Kpb."-IIɧ/GbAzG"ژGS,J6ZX6k7ф{5 |pT<[,O7`*"Lu,OmJAmYp="rwwK%݋@t!u!70oSx1\ܤx'؞4DB;n}ۯ{K^`m>3:Ud.qDžlSI^ےDJ BCQ$oTV#W{KP貞F7BEaWQeYgnF7߆kFCoW<WP Z#cpq.[L9`SKQt\/)0{ BT4UivkvW4bu #mpH M?clh&o[!Q9]~?H4+}ĀY[`xcNyoc{ֽRs'}ٸ삝i9Cϳҋ2kʦئxЧx|_.W-GM>*'.*jM~~sD{^ k`Yj{'Xep~ͷ*Ȕ$eVHx5IDG}kŘ-稙)Jٔ zr̅վlTr^~Qv??)Į ŝa`/t1!@aw}=[Oڿ[JOY#% ZBnӞk[Z2dZ57(u = ǣ;SXuRiUYV5Q\=ivdeMp򓉝.?n62 י-T܅S5|dƼ&=Rm[3b=bck$/G&*LөaE^?r Rޖ\+%#sQAa5KgܓPVU5EsW"P.äx$Z}K7M& "i Jf*R`MC6@fRjrumFRvymiq%mb3KJsٶ a/l QoXถ6cbN1U*~ņjtA˞=+;JˍbnPeLׄ^?dFQ</ P`Dh=n#B45w$H;^.NU{Q mK{Hyq7hx"jEI(Uڑ_P&Y}w{Uͱw'dܺ endstream endobj 201 0 obj <> stream xk``d.uƬoS< lȁUL>NZ%P?.8)a@J pE7ҁa(` endstream endobj 203 0 obj <> stream xeViXdLܻիbU܊ZjBVPlEB K lA놴uQZoz ^8Ϝ/s} !AjiتLժhzRuxb\$^ƎaZ|\2"c㒣ҧrK"JJU=S|e*[XVJ:%}NlZ1=RMɈFm _8yr'p?x |bH(8=!A"b8C Ą/!!(9+By^T&&d{Mk=+32G*5۰ab){qЧ [}OR߫QPSPRq +D}h)JpIE@K0Eی/NywMN=Li2)ݚB<91O*uig]C[YoW1 8vr&p:&:]UvdE*4[Ԓ;mcY3upކ0կsZR a*&xHr=I ϔA"i([I%?_ចLӔX꒦fZ xhWya kbl&Fl| c`V/~%PQ^XYle>xzukzI#J 4hA~ V/-۞<#AR`rsp(i 12`n18D6[x,{ F72dХj[+$zhɔPZˊ;vptG~'} D`@4T/gy[P.>̥F鞉4c{AdBCBu: N![an^0 wD54X#M.XsF5:!U%Cy8!<<}f -_mrh82qT59 Ql5cLCV6-dĎB%$Hd>۝)i ^0,y6l ]LҙTڐ_f0 #)r#СQՇ9<O!گγpU-l Zɼα+n ,7IoyT(2:8& ;h1^JeT]3a͡Xx<} ňO{* 3GNfHʰ\5k5]zyfZcJ|mZRe-i0 dC$RE]EXe'yܣm1=u/;J?Lj~@q=˵ALr|a[[n`lh}FZzIl}fd üOWviy)8O|kxWBGۗ0bvTjcZؐs.Jþ| oSr{u52!=>Xr{ԟWhou:bQ!B_Wd6 9u?@ε лKu\JG $[wo'7 PAYzyoiW;U%fX^rE6JYjjJ&]"Ûie~N{JS)wkAqIًb/C䪋v5mיz%wKݼ>I!1T=4SN [k9ҁ!QyOD-ElHR^TcyiD%e=-NS:,㥦6BS9"kCYm{'t+ETjr:w@so !"G ix5p<~wf` d*:Y0Lm`ג9xx2K4F̺ed~0Q-?.@^:Y\F76sViKwlmV^j~|5|d`Ε uggK;Afӡ}O8Cw_4dj wCcj^zH7c;e'h6.N=x#GF\Gih֪n͗a<\[B|J!6T)uZЇ~"\$r팴 oѶ$,ўO.9?/n[7k~$'{n/SYPAətkrcye9&8k/^@gliU0p %U,Y^XEطj9,z  Pޒoڍ-RW]cS_P_q$!!B qXg2. >N,- cQUP .QI endstream endobj 205 0 obj <> stream xڛ0U0 yDG3 endstream endobj 207 0 obj <> stream x=TmLSgεj{5,jX`Y ?NiRм0tZD6a(P# 941~l%6r^y!٭89yޓyP4MI~Fc O@ (,XR`I1e<= 42 E=zA$aN*in \܋\n_ܒlg'vI.G!9y[9Ais$Ϝ!tɞ[=)Ϳ'O{>sIvwctH+ ]2ҿo!.ZB٨]Iq\KirWN+t~xSQx\<@ 8~n+fkvttΖ5ơx-PgZrȎ+*2uKhQ_Ÿy+$xi7]jo5 }E. .E]&H-Y<, L+}@_-@AT ʴ.2ɔ. p y*/MWKRڵ(*|zMSquYrdMX=rrS2Ѩ6fA`*Xmm0_7*yBđ O,!Eugk8Uo.P. x*(F8f0?D endstream endobj 208 0 obj <> stream xk`Q / endstream endobj 210 0 obj <> stream x=RmHSQ>w9O~lYAsB#郄#%Yif2]~_rt6 ?ca +,0QD`EE=}yx abjsH-,gk!lElMHN`dN%oQ˛4i*eV̮/~6!DʄDQMpEo4U^??(XNY+/|YN3W横+\rY Gd*\\NټΙjv23w,-'- f1WT*7)? /#ИLo||0"cHqIt{D@%#{B|LQbdcS"߶I/aP#rP}. |R$IYEHZlR:Ht%^5[L.PT#jGiN,˷_ `ЦXJ'Y:Xjt#ޤti AʤWZa-vSZ r ڇzh(uC].Q+/F*nڨZ/M~Û ~u ZeɃ';+)_HAtYOoIbz0 ( rE6*t1f=tl{CZ/ϯ=r' SxOxW^bFtq D9T endstream endobj 211 0 obj <> stream xk`  endstream endobj 14 0 obj <> stream x[ko_1*rޏ0 KU"N KmDAK+(E*$..Mږ[Aٙs$10dRB+f=Z@q0Asf_!!S$HB:. 2'@DEOh1D4NB 4X sp8,x@6*z*TZtZ:ԖXh49ct:X4NL`F@9/  0TT?(X{r#נ.&7z YNnCAK ƒ`HE㈩pq0(ƣG t3@&HCï\jtd PkH Pd%9B{@^xB"aY\K A5 ZX~ ִ¥XA١& 8CYi@*pIqlǡN6DKv0 z ",Ac)*%qy-๢& 3H 3hIhÓ \-:yKЇ!4˻d%OEj)…@((3*^\EVxQS@cɼx01s`a#HTf@RC4! fD6&3esF7 @F1- >@J$dC  |dB9E$(IVU 'WiitLdEv]0 {>.Astܔ y|v}Y.gòq?+OEz\ ?h^}|('lٿ||_M9M>пD`eլv(ٴ=p~@P8+$bh(<%> +-?&eBH/8T4i^-SֆlYY{ kmEYV^--˲x9[,^nBJL)ӷ-rv0V=9$Pbotν6[LR8`+D(4򅒮DTW}M~?{ *%(CHE/8%m_Z y]j,7BxK†%ju#+_ U"0GXg2Ymgfڎ =H>D=9/%PlxоI<GYz6L {u4K & vETǢP:H )NʈBM)?oiZʂ6Z#7ɂƴ sXIHJܝ)<׸fs ߏr9/{G74/|Qyx;xT7FQS+uþUU=oaC<&ZiEBIWD%${AeTB/dz^`[Xl<2q|Hξdzt.k )2ե1HSW=: +=:jhg5UۣSzt uVGb Уc1v,D -t-"_{7lw#MK3-jo'5Q"(褓p@)hFkvԙVY3[E٪^V7VYpSo6cѴIeNz P$I "VLN(NhOyO]\TnNJetY(աҊUr++w\"7%-3+@xN uf3:S;}zȦ7:y'r+zckaL&gBG9QX$ wD;ܴ?\W.֕MU7cدz_ _N`q2r]Vi@]d5[l+Ԏf+ jNgrf *\lS! f.Vi֛,V593ҒUjǮ] ĠM.H:lo[[>^9 6(,m^Yڣ2+DAVĞ&`} K,E&}Mȇxpa_r44qs [W%qUâkUތG/f77 3-g󣗣-xOgj60j4x\\l;u9?pǑ`GDlz6ZGgߡrű&!sGKPNO[Ha }uQ0Q ݆1R.]stYaz3Cit+jJW]5%d !jGsӆ+x ј8v{+C%"*>h1CxR!PQ2Њ Vz(x)C:BXb K p0j%<1ɄF 梛^Q0t\(ALƼtzxB F*|HzFx:aQ0<J n9}P1cP]O DdP8 "ܪbxu ԋDhF"[*> "(d4ɢBTF+ C3s6~'nc^k  V6 ]hhnk^QxhQֿޞտgtMy8iŲy7^,Nnf^g1]`/?aRti/gz>~S~>i 5rVҙR,Ko,Hd_7`HM+cE /0;Owdt~lQ>=tYixR'45MK(޸joצ?gnFcWiq5t%)H;f~Ok\&]$@ԗFH߉7Q_5l&RYF,ōƍ87gz5Ξ0ѤB 3H dHB а iP,2q\-@q@Fk@2ZwH: y8@^z}qQ#h)1KW]il';l<(`9iCEy=ڀ 9b$ á.F_\<;Ysɞ_42Ah;"Esޱ񧧾e.:N2nP#ZG95 j>4o_8qP)cTݾs¹:CĭCv#ݘ[LJ9Vn2| oV1^H[q,Zt-Huqƈ8d >Hn!B endstream endobj 212 0 obj <]/Root 1 0 R/Info 2 0 R/Size 213/W[1 3 2]/Filter/FlateDecode/Length 505>> stream x5YPq):rGH!eqEhAԱYBEH(ٗd3̸vøuq`̸3=39""a"҈0^ëxDТ;(*Dǰ?XSnk,6lŮa؀=+1R%(2\ALñ\p<. \ Wa&ft,mqnqb܇E< invburr \put(13,4.2){\vector(0,1){0.95}} % invburr -> invparalogis \put(11.7,3.1){\line(-1,-1){1}} \put(10.7,2.1){\line(-1,0){7.7}} \put(3,2.1){\vector(-1,-1){1.1}} % invburr -> llogis \put(13,3){\vector(0,-1){2}} % invburr -> invpareto \put(2.05,3.1){\vector(2,-1){4.2}} % burr -> pareto \put(1,3){\vector(0,-1){2}} % burr -> llogis \put(6,6){\vector(-2,-1){3.85}} % trbeta -> burr \put(1,4.2){\vector(0,1){0.95}} % burr -> paralogis \put(7,6){\vector(0,-1){1.8}} % trbeta -> genpareto \put(7,9){\vector(0,-1){1.8}} % fpareto -> trbeta \put(7,3){\vector(0,-1){2}} % genpareto -> pareto \put(8,3){\vector(2,-1){4}} % genpareto -> invpareto % \put(6,9){\vector(-2,-1){3.3}} % fpareto -> pareto3 % \put(8,9){\vector(2,-1){3.3}} % fpareto -> pareto1 \put(1,9){\vector(0,-1){1.1}} % pareto4 -> pareto3 \put(13,9){\vector(0,-1){1.1}} % pareto2 -> pareto1 \put(4.5,9.6){\vector(-1,0){1.75}} % fpareto -> pareto4 \put(9.5,9.6){\vector(1,0){1.75}} % fpareto -> pareto2 \put(14.7,9.6){\line(1,0){1.5}} % pareto2 -> pareto \put(16.2,9.6){\line(0,-1){10}} \put(16.2,-0.4){\line(-1,0){7.5}} \put(8.7,-0.4){\vector(-2,1){0.72}} \put(14.8,9.62){\makebox(0,0.5)[l]{$\mu = 0$}} \put(7,9.65){\makebox(0,0.5)[c]{Feller-Pareto}} \put(7,9.1){\makebox(0,0.5)[c]{$\mu, \alpha, \gamma, \tau, \theta$}} \put(7,9.6){\oval(5,1.2)} \put(3.2,9.65){\makebox(0,0.5)[l]{$\tau = 1$}} \put(1,9.65){\makebox(0,0.5)[c]{Pareto IV}} \put(1,9.1){\makebox(0,0.5)[c]{$\mu, \alpha, \gamma, \theta$}} \put(1,9.6){\oval(3.4,1.2)} \put(9.8,9.05){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(9.8,9.65){\makebox(0,0.5)[l]{$\tau = 1$}} \put(13,9.65){\makebox(0,0.5)[c]{Pareto II}} \put(13,9.1){\makebox(0,0.5)[c]{$\mu,\alpha, \theta$}} \put(13,9.6){\oval(3.4,1.2)} \put(0.8,8.3){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,7.35){\makebox(0,0.5)[c]{Pareto III}} \put(1,6.8){\makebox(0,0.5)[c]{$\mu, \gamma, \theta$}} \put(1,7.3){\oval(3.4,1.2)} \put(13.2,8.3){\makebox(0,0.5)[l]{$\mu = \theta$}} \put(13,7.35){\makebox(0,0.5)[c]{Pareto I}} \put(13,6.8){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(13,7.3){\oval(3.4,1.2)} \put(7.2,7.9){\makebox(0,0.5)[l]{$\mu = 0$}} \put(7,6.65){\makebox(0,0.5)[c]{Transformed beta}} \put(7,6.1){\makebox(0,0.5)[c]{$\alpha, \gamma, \tau, \theta$}} \put(7,6.6){\oval(5,1.2)} \put(9.2,5.4){\rotatebox{-26.6}{\makebox(0,0.5)[l]{$\alpha = 1$}}} \put(13.20,3.65){\makebox(0,0.5)[c]{Inverse Burr}} \put(13.20,3.1){\makebox(0,0.5)[c]{$\gamma, \tau, \theta$}} \put(13.20,3.6){\oval(3.4,1.2)} \put(13.2,4.3){\makebox(0,0.5)[l]{$\gamma = \tau$}} \put(13.20,5.80){\makebox(0,0.5)[c]{Inverse paralogistic}} \put(13.20,5.25){\makebox(0,0.5)[c]{$\tau, \theta$}} \put(13.20,5.75){\oval(5.4,1.2)} \put(13.2,1.9){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(13.20,0.45){\makebox(0,0.5)[c]{Inverse Pareto}} \put(13.20,-0.1){\makebox(0,0.5)[c]{$\tau, \theta$}} \put(13.20,0.4){\oval(3.9,1.2)} \put(7.2,4.9){\makebox(0,0.5)[l]{$\gamma = 1$}} \put(7,3.65){\makebox(0,0.5)[c]{Generalized Pareto}} \put(7,3.1){\makebox(0,0.5)[c]{$\alpha, \tau, \theta$}} \put(7,3.6){\oval(4.9,1.2)} \put(7.2,1.25){\makebox(0,0.5)[l]{$\tau = 1$}} \put(7,0.45){\makebox(0,0.5)[c]{Pareto}} \put(7,-0.1){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(7,0.4){\oval(2.2,1.2)} \put(4.5,5.4){\rotatebox{26.6}{\makebox(0,0.5)[r]{$\tau = 1$}}} \put(1,3.65){\makebox(0,0.5)[c]{Burr}} \put(1,3.1){\makebox(0,0.5)[c]{$\alpha, \gamma, \theta$}} \put(1,3.6){\oval(2.5,1.2)} \put(0.8,4.3){\makebox(0,0.5)[r]{$\gamma = \alpha$}} \put(1,5.80){\makebox(0,0.5)[c]{Paralogistic}} \put(1,5.25){\makebox(0,0.5)[c]{$\alpha, \theta$}} \put(1,5.75){\oval(3.4,1.2)} \put(0.8,1.9){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,0.45){\makebox(0,0.5)[c]{Loglogistic}} \put(1,-0.1){\makebox(0,0.5)[c]{$\gamma, \theta$}} \put(1,0.4){\oval(3.4,1.2)} \put(9.8,2.1){\rotatebox{-26.6}{\makebox(0,0.5)[r]{$\alpha = 1$}}} \put(4.0,2.1){\rotatebox{-26.6}{\makebox(0,0.5)[r]{$\gamma = 1$}}} \put(11.25,3.0){\rotatebox{45}{\makebox(0,0.5)[r]{$\tau = 1$}}} \end{picture} \caption{Interrelations between distributions of the Feller--Pareto family. This diagram is an extension of Figure~5.2 of \citet{LossModels4e}.} \label{fig:diagram:fp-family} \end{figure} \begin{figure} \setlength{\unitlength}{0.7cm} \begin{picture}(7.5,5.2)(-0.25,0) \small % Flèches \put(4,4){\vector(2,-1){1.55}} % trgamma -> weibull \put(5.55,2){\vector(-2,-1){1.55}} % weibull -> exp \put(1.55,2){\vector(2,-1){1.55}} % gamma -> exp \put(3,4){\vector(-2,-1){1.55}} % trgamma -> gamma \put(3.5,4.65){\makebox(0,0.5)[c]{Transformed gamma}} \put(3.5,4.1){\makebox(0,0.5)[c]{$\alpha, \tau, \lambda$}} \put(3.5,4.6){\oval(5.5,1.2)} \put(5.4,3.45){\makebox(0,0.5)[l]{$\alpha = 1$}} \put(6,2.65){\makebox(0,0.5)[c]{Weibull}} \put(6,2.1){\makebox(0,0.5)[c]{$\tau, \lambda$}} \put(6,2.6){\oval(2.5,1.2)} \put(5.4,1.35){\makebox(0,0.5)[l]{$\tau = 1$}} \put(3.5,0.65){\makebox(0,0.5)[c]{Exponential}} \put(3.5,0.1){\makebox(0,0.5)[c]{$\lambda$}} \put(3.5,0.6){\oval(3.5,1.2)} \put(1.6,1.35){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,2.65){\makebox(0,0.5)[c]{Gamma}} \put(1,2.1){\makebox(0,0.5)[c]{$\alpha, \lambda$}} \put(1,2.6){\oval(2.5,1.2)} \put(1.6,3.45){\makebox(0,0.5)[r]{$\tau = 1$}} \end{picture} \hfill \begin{picture}(8.75,5.2)(-0.875,0) \small % Flèches \put(4,4){\vector(2,-1){1.55}} % trgamma -> weibull \put(5.55,2){\vector(-2,-1){1.55}} % weibull -> exp \put(1.55,2){\vector(2,-1){1.55}} % gamma -> exp \put(3,4){\vector(-2,-1){1.55}} % trgamma -> gamma \put(3.5,4.65){\makebox(0,0.5)[c]{Inverse transformed gamma}} \put(3.5,4.1){\makebox(0,0.5)[c]{$\alpha, \tau, \lambda$}} \put(3.5,4.6){\oval(7,1.2)} \put(5.4,3.45){\makebox(0,0.5)[l]{$\alpha = 1$}} \put(6,2.65){\makebox(0,0.5)[c]{Inverse Weibull}} \put(6,2.1){\makebox(0,0.5)[c]{$\tau, \lambda$}} \put(6,2.6){\oval(4,1.2)} \put(5.4,1.35){\makebox(0,0.5)[l]{$\tau = 1$}} \put(3.5,0.65){\makebox(0,0.5)[c]{Inverse exponential}} \put(3.5,0.1){\makebox(0,0.5)[c]{$\lambda$}} \put(3.5,0.6){\oval(5,1.2)} \put(1.6,1.35){\makebox(0,0.5)[r]{$\alpha = 1$}} \put(1,2.65){\makebox(0,0.5)[c]{Inverse gamma}} \put(1,2.1){\makebox(0,0.5)[c]{$\alpha, \lambda$}} \put(1,2.6){\oval(4,1.2)} \put(1.6,3.45){\makebox(0,0.5)[r]{$\tau = 1$}} \end{picture} \caption{Interrelations between distributions of the transformed gamma and inverse transformed gamma families. Diagrams derived from Figure~5.3 of \citet{LossModels4e}.} \label{fig:diagram:trgamma-family} \end{figure} In addition to the \code{d}, \code{p}, \code{q} and \code{r} functions, \pkg{actuar} introduces \code{m}, \code{lev} and \code{mgf} functions to compute, respectively, the theoretical raw moments \begin{equation*} m_k = \E{X^k}, \end{equation*} the theoretical limited moments \begin{equation*} \E{(X \wedge x)^k} = \E{\min(X, x)^k} \end{equation*} and the moment generating function \begin{equation*} M_X(t) = \E{e^{tX}}, \end{equation*} when it exists. Every distribution of \autoref{tab:continuous} is supported, along with the following distributions of base R: beta, exponential, chi-square, gamma, lognormal, normal (no \code{lev}), uniform and Weibull. The \code{m} and \code{lev} functions are especially useful for estimation methods based on the matching of raw or limited moments; see the \code{lossdist} vignette for their empirical counterparts. The \code{mgf} functions come in handy to compute the adjustment coefficient in ruin theory; see the \code{risk} vignette. \section{Phase-type distributions} \label{sec:phase-type} In addition to the 19 distributions of \autoref{tab:continuous}, the package provides support for a family of distributions deserving a separate presentation. Phase-type distributions \citep{Neuts_81} are defined as the distribution of the time until absorption of continuous time, finite state Markov processes with $m$ transient states and one absorbing state. Let \begin{equation} \label{eq:Markov-transition-matrix} \mat{Q} = \begin{bmatrix} \mat{T} & \mat{t} \\ \mat{0} & 0 \end{bmatrix} \end{equation} be the transition rates matrix (or intensity matrix) of such a process and let $(\mat{\pi}, \pi_{m + 1})$ be the initial probability vector. Here, $\mat{T}$ is an $m \times m$ non-singular matrix with $t_{ii} < 0$ for $i = 1, \dots, m$ and $t_{ij} \geq 0$ for $i \neq j$, $\mat{t} = - \mat{T} \mat{e}$ and $\mat{e}$ is a column vector with all components equal to 1. Then the cdf of the time until absorption random variable with parameters $\mat{\pi}$ and $\mat{T}$ is \begin{equation} \label{eq:cdf-phtype} F(x) = \begin{cases} \pi_{m + 1}, & x = 0, \\ 1 - \mat{\pi} e^{\mat{T} x} \mat{e}, & x > 0, \end{cases} \end{equation} where \begin{equation} \label{eq:matrix-exponential} e^{\mat{M}} = \sum_{n = 0}^\infty \frac{\mat{M}^n}{n!} \end{equation} is the matrix exponential of matrix $\mat{M}$. The exponential distribution, the Erlang (gamma with integer shape parameter) and discrete mixtures thereof are common special cases of phase-type distributions. The package provides \code{d}, \code{p}, \code{r}, \code{m} and \code{mgf} functions for phase-type distributions. The root is \code{phtype} and parameters $\mat{\pi}$ and $\mat{T}$ are named \code{prob} and \code{rates}, respectively; see also \autoref{app:phase-type}. For the package, function \code{pphtype} is central to the evaluation of the ruin probabilities; see \code{?ruin} and the \code{risk} vignette. \section{Extensions to standard discrete distributions} \label{sec:discrete} The package introduces support functions for additional counting distributions commonly used in loss frequency modeling. A counting distribution is a discrete distribution defined on the non-negative integers $0, 1, 2, \dots$. Let $N$ be the counting random variable. We denote $p_k$ the probability that the random variable $N$ takes the value $k$, that is: \begin{equation*} p_k = \Pr[N = k]. \end{equation*} \citet{LossModels4e} classify counting distributions in two main classes. First, a discrete random variable is a member of the $(a, b, 0)$ class of distributions if there exists constants $a$ and $b$ such that \begin{equation*} \frac{p_k}{p_{k - 1}} = a + \frac{b}{k}, \quad k = 1, 2, \dots. \end{equation*} The probability at zero, $p_0$, is set such that $\sum_{k = 0}^\infty p_k = 1$. The members of this class are the Poisson, the binomial, the negative binomial and its special case, the geometric. These distributions are all well supported in base R with \code{d}, \code{p}, \code{q} and \code{r} functions. The second class of distributions is the $(a, b, 1)$ class. A discrete random variable is a member of the $(a, b, 1)$ class of distributions if there exists constants $a$ and $b$ such that \begin{equation*} \frac{p_k}{p_{k - 1}} = a + \frac{b}{k}, \quad k = 2, 3, \dots. \end{equation*} One will note that recursion starts at $k = 2$ for the $(a, b, 1)$ class. Therefore, the probability at zero can be any arbitrary number $0 \leq p_0 \leq 1$. Setting $p_0 = 0$ defines a subclass of so-called \emph{zero-truncated} distributions. The members of this subclass are the zero-truncated Poisson, the zero-truncated binomial, the zero-truncated negative binomial and the zero-truncated geometric. Let $p_k^T$ denote the probability mass in $k$ for a zero-truncated distribution. As above, $p_k$ denotes the probability mass for the corresponding member of the $(a, b, 0)$ class. We have \begin{equation*} p_k^T = \begin{cases} 0, & k = 0 \\ \displaystyle\frac{p_k}{1 - p_0}, & k = 1, 2, \dots. \end{cases} \end{equation*} Moreover, let $P(k)$ denotes the cumulative distribution function of a member of the $(a, b, 0)$ class. Then the cdf $P^T(k)$ of the corresponding zero-truncated distribution is \begin{equation*} P^T(k) = \frac{P(k) - P(0)}{1 - P(0)} = \frac{P(k) - p_0}{1 - p_0} \end{equation*} for all $k = 0, 1, 2, \dots$. Alternatively, the survival function $\bar{P}^T(k) = 1 - P^T(k)$ is \begin{equation*} \bar{P}^T(k) = \frac{\bar{P}(k)}{\bar{P}(0)} = \frac{\bar{P}(k)}{1 - p_0}. \end{equation*} Finally, let $\kappa_n$ denote the $n$\textsuperscript{th} cumulant of a distribution. The first three cumulants of a zero-truncated distribution are \begin{equation} \label{eq:cumulants} \begin{split} \kappa_1^T &= c \kappa_1 \\ \kappa_2^T &= c \kappa_2 + (1 - c) c \kappa_1^2 \\ \kappa_3^T &= c m_3 - 3 c^2 m_1 m_2 + 2 c^3 m_1^3, \end{split} \end{equation} where $m_k$ is the $k$\textsuperscript{th} raw moment as defined in \autoref{sec:continuous}, and with \begin{equation*} c = \frac{1}{1 - p_0}. \end{equation*} Package \pkg{actuar} provides \code{d}, \code{p}, \code{q} and \code{r} functions for the all the zero-truncated distributions mentioned above. \autoref{tab:discrete} lists the root names of the functions; see \autoref{app:discrete} for additional details. \begin{table} \centering \begin{tabular}{ll} \toprule Distribution & Root \\ \midrule Zero-truncated Poisson & \code{ztpois} \\ Zero-truncated binomial & \code{ztbinom} \\ Zero-truncated negative binomial & \code{ztnbinom} \\ Zero-truncated geometric & \code{ztgeom} \\ Logarithmic & \code{logarithmic} \\ \addlinespace[6pt] Zero-modified Poisson & \code{zmpois} \\ Zero-modified binomial & \code{zmbinom} \\ Zero-modified negative binomial & \code{zmnbinom} \\ Zero-modified geometric & \code{zmgeom} \\ Zero-modified logarithmic & \code{zmlogarithmic} \\ \bottomrule \end{tabular} \caption{Members of the $(a, b, 1)$ class of discrete distributions supported by \pkg{actuar} and root names of the R functions.} \label{tab:discrete} \end{table} An entry of \autoref*{tab:discrete} deserves a few additional words. The logarithmic (or log-series) distribution with parameter $\theta$ has pmf \begin{equation*} p_k = \frac{a \theta^x}{k}, \quad k = 1, 2, \dots, \end{equation*} with $a = -1/\log(1 - \theta)$ and for $0 \leq \theta < 1$. This is the standard parametrization in the literature \citep{Johnson:discrete:2005}. The logarithmic distribution is always defined on the strictly positive integers. As such, it is not qualified as ``zero-truncated'', but it nevertheless belongs to the $(a, b, 1)$ class of distributions, more specifically to the subclass with $p_0 = 0$. Actually, the logarithmic distribution is the limiting case of the zero-truncated negative binomial distribution with size parameter equal to zero and $\theta = 1 - p$, where $p$ is the probability of success for the zero-truncated negative binomial. Note that this differs from the presentation in \citet{LossModels4e}. Another subclass of the $(a, b, 1)$ class of distributions is obtained by setting $p_0$ to some arbitrary number $p_0^M$ subject to $0 < p_0^M \leq 1$. The members of this subclass are called \emph{zero-modified} distributions. Zero-modified distributions are discrete mixtures between a degenerate distribution at zero and the corresponding distribution from the $(a, b, 0)$ class. Let $p_k^M$ and $P^M(k)$ denote the pmf and cdf of a zero-modified distribution. Written as a mixture, the pmf is \begin{equation} \label{eq:mixture} p_k^M = \left(1 - \frac{1 - p_0^M}{1 - p_0} \right) \mathbb{1}_{\{k = 0\}} + \frac{1 - p_0^M}{1 - p_0}\, p_k. \end{equation} Alternatively, we have \begin{equation*} p_k^M = \begin{cases} p_0^M, & k = 0 \\ \displaystyle\frac{1 - p_0^M}{1 - p_0}\, p_k, & k = 1, 2, \dots \end{cases} \end{equation*} and \begin{align*} P^M(k) &= p_0^M + (1 - p_0^M) \frac{P(k) - P(0)}{1 - P(0)} \\ &= p_0^M + \frac{1 - p_0^M}{1 - p_0}\, (P(k) - p_0) \\ &= p_0^M + (1 - p_0^M)\, P^T(k) \end{align*} for all $k = 0, 1, 2, \dots$. The survival function is \begin{equation*} \bar{P}^M(k) = (1 - p_0^M)\, \frac{\bar{P}(k)}{\bar{P}(0)} = \frac{1 - p_0^M}{1 - p_0}\, \bar{P}(k) = (1 - p_0^M)\, \bar{P}^T(k). \end{equation*} Therefore, we can also write the pmf of a zero-modified distribution as a mixture of a degenerate distribution at zero and the corresponding zero-truncated distribution: \begin{equation} \label{eq:mixture:alt} p_k^M = p_0^M \mathbb{1}_{\{k = 0\}} + (1 - p_0^M)\, p_k^T. \end{equation} The first three cumulants $\kappa_1^M$, $\kappa_2^M$, $\kappa_3^M$ of a zero-modified distribution are given by the corresponding right hand sides of \eqref{eq:cumulants} with \begin{equation} \label{eq:c-in-cumulants-zm} c = \frac{1 - p_0^M}{1 - p_0}. \end{equation} The members of the subclass are the zero-modified Poisson, zero-modified binomial, zero-modified negative binomial and zero-modified geometric, together with the zero-modified logarithmic as a limiting case of the zero-modified negative binomial. \autoref{tab:discrete} lists the root names of the support functions provided in \pkg{actuar}; see also \autoref{app:discrete}. Clearly, zero-truncated distributions are zero-modified distributions with $p_0^M = 0$. However, using the dedicated functions in R will be more efficient. \section{Poisson-inverse Gaussian distribution} \label{sec:pig} The Poisson-inverse Gaussian (PIG) distribution results from the continuous mixture between a Poisson distribution and an inverse Gaussian. That is, the Poisson-inverse Gaussian is the (marginal) distribution of the random variable $X$ when the conditional random variable $X|\Lambda = \lambda$ is Poisson with parameter $\lambda$ and the random variable $\Lambda$ is inverse Gaussian with parameters $\mu$ and $\phi$. The literature proposes many different expressions for the pmf of the PIG \citep{Holla:PIG:1966,Shaban:PIG:1981,Johnson:discrete:2005,LossModels4e}. Using the parametrization for the inverse Gaussian found in \autoref{app:continuous}, we have: \begin{equation} \label{eq:pig:px} \begin{split} p_x &= \sqrt{\frac{2}{\pi \phi}} \frac{e^{(\phi\mu)^{-1}}}{x!} \left( \sqrt{2\phi \left( 1 + \frac{1}{2\phi\mu^2} \right)} \right)^{-\left( x - \frac{1}{2} \right)} \\ &\phantom{=} \times K_{x - \frac{1}{2}} \left( \sqrt{\frac{2}{\phi}\left(1 + \frac{1}{2\phi\mu^2}\right)} \right), \end{split} \end{equation} for $x = 0, 1, \dots$, $\mu > 0$, $\phi > 0$ and where \begin{equation} \label{eq:bessel_k} K_\nu(ax) = \frac{a^{-\nu}}{2} \int_0^\infty t^{\nu - 1} e^{- z(t + at^{-1})/2} dt, \quad a^2 z > 0 \end{equation} is the modified Bessel function of the third kind \citep{Bateman:1953:2,Abramowitz:1972}. One may compute the probabilities $p_x$, $x = 0, 1, \dots$ recursively using the following equations: \begin{equation} \label{eq:pig:px:recursive} \begin{split} p_0 &= \exp\left\{ \frac{1}{\phi\mu} \left(1 - \sqrt{1 + 2\phi\mu^2}\right) \right\} \\ p_1 &= \frac{\mu}{\sqrt{1 + 2\phi\mu^2}}\, p_0 \\ p_x &= \frac{2\phi\mu^2}{1 + 2\phi\mu^2} \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{\mu^2}{1 + 2\phi\mu^2} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{split} \end{equation} The first moment of the distribution is $\mu$. The second and third central moment are, respectively, \begin{align*} \mu_2 &= \sigma^2 = \mu + \phi\mu^3 \\ \mu_3 &= \mu + 3 \phi \mu^2 \sigma^2. \end{align*} For the limiting case $\mu = \infty$, the underlying inverse Gaussian has an inverse chi-squared distribution. The latter has no finite strictly positive, integer moments and, consequently, neither does the Poisson-inverse Gaussian. See \autoref{app:discrete:pig} for the formulas in this case. \section{Special integrals} \label{sec:special-integrals} Many of the cumulative distribution functions of \autoref{app:continuous} are expressed in terms of the incomplete gamma function or the incomplete beta function. From a probability theory perspective, the incomplete gamma function is usually defined as \begin{equation} \label{eq:pgamma} \Gamma(\alpha; x) = \frac{1}{\Gamma(\alpha)} \int_0^x t^{\alpha - 1} e^{-t}\, dt, \quad \alpha > 0, x > 0, \end{equation} with \begin{equation*} \Gamma(\alpha) = \int_0^\infty t^{\alpha - 1} e^{-t}\, dt, \end{equation*} whereas the (regularized) incomplete beta function is defined as \begin{equation} \label{eq:pbeta} \beta(a, b; x) = \frac{1}{\beta(a, b)} \int\limits_0^x t^{a - 1} (1 - t)^{b - 1}\, dt, \quad a > 0, b > 0, 0 < x < 1, \end{equation} with \begin{equation*} \beta(a, b) = \int_0^1 t^{a - 1} (1 - t)^{b - 1}\, dt = \frac{\Gamma(a) \Gamma(b)}{\Gamma(a + b)}. \end{equation*} Now, there exist alternative definitions of the these functions that are valid for negative values of the parameters. \citet{LossModels4e} introduce them to extend the range of admissible values for limited expected value functions. First, following \citet[Section~6.5]{Abramowitz:1972}, we define the ``extended'' incomplete gamma function as \begin{equation} \label{eq:gammainc} G(\alpha; x) = \int_x^\infty t^{\alpha - 1} e^{-t}\, dt \end{equation} for $\alpha$ real and $x > 0$. When $\alpha > 0$, we clearly have \begin{equation} \label{eq:gammainc:apos} G(\alpha; x) = \Gamma(a) [1 - \Gamma(\alpha; x)]. \end{equation} The integral is also defined for $\alpha \le 0$. As outlined in \citet[Appendix~A]{LossModels4e}, integration by parts of \eqref{eq:gammainc} yields the relation \begin{equation*} G(\alpha; x) = -\frac{x^\alpha e^{-x}}{\alpha} + \frac{1}{\alpha} G(\alpha + 1; x). \end{equation*} This process can be repeated until $\alpha + k$ is a positive number, in which case the right hand side can be evaluated with \eqref{eq:gammainc:apos}. If $\alpha = 0, -1, -2, \dots$, this calculation requires the value of \begin{equation*} \label{eq:expint} G(0; x) = \int_x^\infty \frac{e^{-t}}{t}\, dt = E_1(x), \end{equation*} which is known in the literature as the \emph{exponential integral} \citep[Section~5.1]{Abramowitz:1972}. Second, as seen in \citet[Section~6.6]{Abramowitz:1972}, we have the following relation for the integral on the right hand side of \eqref{eq:pbeta}: \begin{equation*} \int\limits_0^x t^{a - 1} (1 - t)^{b - 1}\, dt = \frac{x^a}{a}\, F(a, 1 - b; a + 1; x), \end{equation*} where \begin{equation*} F(a, b; c; z) = \frac{\Gamma(c)}{\Gamma(a) \Gamma(b)} \sum_{k = 0}^\infty \frac{\Gamma(a + k) \Gamma(b + k)}{\Gamma(c + k)} \frac{z^k}{k!} \end{equation*} is the Gauss hypergeometric series. With the above definition, the incomplete beta function also admits negative, non integer values for parameters $a$ and $b$. Now, let \begin{equation} \label{eq:betaint} B(a, b; x) = \Gamma(a + b) \int_0^x t^{a-1} (1-t)^{b-1} dt \end{equation} for $a > 0$, $b \neq -1, -2, \dots$ and $0 < x < 1$. Again, it is clear that when $b > 0$, \begin{equation*} B(a, b; x) = \Gamma(a) \Gamma(b) \beta(a, b; x). \end{equation*} Of more interest here is the case where $b < 0$, $b \neq -1, -2, \dots$ and $a > 1 + \lfloor -b\rfloor$. Integration by parts of \eqref{eq:betaint} yields \begin{equation} \label{eq:betaint:2} \begin{split} B(a, b; x) &= \displaystyle -\Gamma(a + b) \left[ \frac{x^{a-1} (1-x)^b}{b} + \frac{(a-1) x^{a-2} (1-x)^{b+1}}{b (b+1)} \right. \\ &\phantom{=} \displaystyle\left. + \cdots + \frac{(a-1) \cdots (a-r) x^{a-r-1} (1-x)^{b+r}}{b (b+1) \cdots (b+r)} \right] \\ &\phantom{=} \displaystyle + \frac{(a-1) \cdots (a-r-1)}{b (b+1) \cdots (b+r)} \Gamma(a-r-1) \\ &\phantom{=} \times \Gamma(b+r+1) \beta(a-r-1, b+r+1; x), \end{split} \end{equation} where $r = \lfloor -b\rfloor$. For the needs of \pkg{actuar}, we dubbed \eqref{eq:betaint} the \emph{beta integral}. Package \pkg{actuar} includes a C implementation of \eqref{eq:betaint:2} and imports functionalities of package \pkg{expint} \citep{expint} to compute the incomplete gamma function \eqref{eq:gammainc} at the C level. The routines are used to evaluate the limited expected value for distributions of the Feller--Pareto and transformed gamma families. \section{Package API: accessing the C routines} \label{sec:api} The actual workhorses behind the R functions presented in this document are C routines that the package exposes to other packages through an API. The header file \file{include/actuarAPI.h} in the package installation directory contains declarations for % the continuous distributions of \autoref{app:continuous}, % the phase-type distributions of \autoref{app:phase-type}, % the discrete distributions of \autoref{app:discrete}, % and the beta integral of \autoref{sec:special-integrals}. The prototypes of the C routines for probability distributions all follow the same pattern modeled after those of base R \citep[Chapter~6]{R-exts}. As an example, here are the prototypes for the Pareto distribution: \begin{Schunk} \begin{Sinput} double dpareto(double x, double shape, double scale, int give_log); double ppareto(double q, double shape, double scale, int lower_tail, int log_p); double qpareto(double p, double shape, double scale, int lower_tail, int log_p); double rpareto(double shape, double scale); double mpareto(double order, double shape, double scale, int give_log); double levpareto(double limit, double shape, double scale, double order, int give_log); \end{Sinput} \end{Schunk} For the beta integral \eqref{eq:betaint:2}, the frontend is a routine \code{betaint} that returns \code{NA} or \code{NaN} for out-of-range arguments, but actual computation is done by routine \code{betaint\_raw}. Both are exposed as follows in the API: \begin{Schunk} \begin{Sinput} double betaint(double x, double a, double b); double betaint_raw(double x, double a, double b, double x1m); \end{Sinput} \end{Schunk} The developer of some package \pkg{pkg} who wants to use a routine --- say \code{dpareto} --- in her code should proceed as follows. \begin{enumerate} \item Add \pkg{actuar} to the \code{Imports} and \code{LinkingTo} directives of the \file{DESCRIPTION} file of \pkg{pkg}; \item Add an entry \code{import(actuar)} in the \file{NAMESPACE} file of \pkg{pkg}; \item Define the routine with a call to \code{R\_GetCCallable} in the initialization routine \code{R\_init\_pkg} of \pkg{pkg} \citep[Section~5.4]{R-exts}. For the current example, the file \file{src/init.c} of \pkg{pkg} would contain the following code: \begin{Schunk} \begin{Sinput} void R_init_pkg(DllInfo *dll) { R_registerRoutines( /* native routine registration */ ); pkg_dpareto = (double(*)(double,int,int)) R_GetCCallable("actuar", "dpareto"); } \end{Sinput} \end{Schunk} \item Define a native routine interface that will call \code{dpareto}, say \code{pkg\_dpareto} to avoid any name clash, in \file{src/init.c} as follows: \begin{Schunk} \begin{Sinput} double(*pkg_dpareto)(double,double,double,int); \end{Sinput} \end{Schunk} \item Declare the routine in a header file of \pkg{pkg} with the keyword \code{extern} to expose the interface to all routines of the package. In our example, file \file{src/pkg.h} would contain: \begin{Schunk} \begin{Sinput} extern double(*pkg_dpareto)(double,double,double,int); \end{Sinput} \end{Schunk} \item Include the package header file \file{pkg.h} in any C file making use of routine \code{pkg\_dpareto}. \end{enumerate} The companion package \pkg{expint} \citep{expint} ships with a complete test package implementing the above. See the vignette of the latter package for more information. \section{Implementation details} \label{sec:implementation} The cdf of the continuous distributions of \autoref{tab:continuous} use \code{pbeta} and \code{pgamma} to compute the incomplete beta and incomplete gamma functions, respectively. Functions \code{dinvgauss}, \code{pinvgauss} and \code{qinvgauss} rely on C implementations of functions of the same name from package \pkg{statmod} \citep{statmod}. The matrix exponential C routine needed in \code{dphtype} and \code{pphtype} is based on \code{expm} from package \pkg{Matrix} \citep{Matrix}. The C code to compute the beta integral \eqref{eq:betaint:2} was written by the second author. For all but the trivial input values, the pmf, cdf and quantile functions for the zero-truncated and zero-modified distributions of \autoref{tab:discrete} use the internal R functions for the corresponding standard distribution. Generation of random variates from zero-truncated distributions uses the following simple inversion algorithm on a restricted range \citep{Dalgaard:r-help:2005,Thomopoulos:2013:simulation}. Let $u$ be a random number from a uniform distribution on $(p_0, 1)$. Then $x = P^{-1}(u)$ is distributed according to the zero-truncated version of the distribution with cdf $P(k)$. For zero-modified distributions, we generate variates from the discrete mixture \eqref{eq:mixture} when $p_0^M \geq p_0$. When $p_0^M < p_0$, we can use either of two methods: \begin{enumerate} \item the classical rejection method with an envelope that differs from the target distribution only at zero (meaning that only zeros are rejected); \item generation from the discrete mixture \eqref{eq:mixture:alt} with the corresponding zero-truncated distribution (hence using the inversion method on a restricted range explained above). \end{enumerate} Which approach is faster depends on the relative speeds of the standard random generation function and the standard quantile function, and also on the proportion of zeros that are rejected using the rejection algorithm. Based on the difference $p_0 - p_0^M$, we determined (empirically) distribution-specific cutoff points between the two methods. Finally, computation of the Poisson-inverse Gaussian pmf uses the recursive equations \eqref{eq:pig:px:recursive}. Versions of \pkg{actuar} prior to 3.0-0 used the direct expression \eqref{eq:pig:px} and the C level function \code{bessel\_k} part of the R API. However, the latter overflows for large values of $\nu$ and this caused \code{NaN} results for the value of \begin{equation*} \frac{B^{- \left(x - \frac{1}{2} \right)} K_{x - \frac{1}{2}}(B/\phi)}{x!} \end{equation*} and, therefore, for the Poisson-inverse Gaussian pmf. \appendix \section{Continuous distributions} \label{app:continuous} This appendix gives the root name and the parameters of the R support functions for the distributions of \autoref{tab:continuous}, as well as the formulas for the pdf, the cdf, the raw moment of order $k$ and the limited moment of order $k$ using the parametrization of \citet{LossModels4e} and \citet{HoggKlugman}. In the following, $\Gamma(\alpha; x)$ is the incomplete gamma function \eqref{eq:pgamma}, $\beta(a, b; x)$ is the incomplete beta function \eqref{eq:pbeta}, $G(\alpha; x)$ is the ``extended'' incomplete gamma function \eqref{eq:gammainc}, $B(a, b; x)$ is the beta integral \eqref{eq:betaint} and $K_\nu(x)$ is the modified Bessel function of the third kind \eqref{eq:bessel_k}. Unless otherwise stated, all parameters are finite and strictly positive, and the functions are defined for $x > 0$. \subsection{Feller--Pareto family} \label{app:continuous:feller-pareto} \subsubsection{Feller--Pareto} \begin{itemize} \item Root: \code{fpareto} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u^\tau (1 - u)^\alpha}{% (x - \mu) \beta (\alpha, \tau )}, \quad u = \frac{v}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(\tau+j/\gamma) \Gamma(\alpha-j/\gamma)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad \text{integer } 0 \leq k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(\tau+j/\gamma, \alpha-j/\gamma; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad \text{integer } k \geq 0, \quad \alpha - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto IV} \begin{itemize} \item Root: \code{pareto4} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \gamma u^\alpha (1 - u)}{(x - \mu)}, \quad u = \frac{1}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(1+j/\gamma) \Gamma(\alpha-j/\gamma)}{% \Gamma(\alpha)}, \quad \text{integer } 0 \leq k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(1+j/\gamma, \alpha-j/\gamma; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad \text{integer } k \geq 0 \quad \alpha - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto III} \begin{itemize} \item Root: \code{pareto3} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u (1 - u)}{(x - \mu)}, \quad u = \frac{v}{1 + v}, \quad v = \left(\frac{x - \mu}{\theta} \right)^\gamma, \quad x > \mu \\ F(x) &= u \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \Gamma(1+j/\gamma) \Gamma(1-j/\gamma), \quad \text{integer } 0 \leq k < \gamma \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, B(1+j/\gamma, 1-j/\gamma; u) \\ &\phantom{=} + x^k (1 - u), \quad \text{integer } k \geq 0 \quad 1 - j/\gamma \neq -1, -2, \dots \end{align*} \subsubsection{Pareto II} \begin{itemize} \item Root: \code{pareto2} \item Parameters: \code{min} ($-\infty < \mu < \infty$), \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha u^\alpha (1 - u)}{(x - \mu)}, \quad u = \frac{1}{1 + v}, \quad v = \frac{x - \mu}{\theta}, \quad x > \mu \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{\Gamma(1+j) \Gamma(\alpha-j)}{% \Gamma(\alpha)}, \quad \text{integer } 0 \leq k < \alpha \\ \E{(X \wedge x)^k} &= \sum_{j = 0}^k \binom{k}{j} \mu^{k - j} \theta^j\, \frac{B(1+j, \alpha-j; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad \text{integer } k \geq 0 \quad \alpha - j \neq -1, -2, \dots \end{align*} \subsubsection{Transformed beta} \begin{itemize} \item Root: \code{trbeta}, \code{pearson6} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u^\tau (1 - u)^\alpha}{% x \beta (\alpha, \tau )}, \qquad u = \frac{v}{1 + v}, \qquad v = \left(\frac{x}{\theta} \right)^\gamma \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\gamma) \Gamma(\alpha-k/\gamma)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad -\tau\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\gamma, \alpha-k/\gamma; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad k > -\tau\gamma \end{align*} \subsubsection{Burr} \begin{itemize} \item Root: \code{burr} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \gamma u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k/\gamma) \Gamma(\alpha-k/\gamma)}{% \Gamma(\alpha)}, \quad -\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k/\gamma, \alpha-k/\gamma; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -\gamma \end{align*} \subsubsection{Loglogistic} \begin{itemize} \item Root: \code{llogis} \item Parameters: \code{shape} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\gamma u (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= u \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1+k/\gamma) \Gamma(1-k/\gamma), \quad -\gamma < k < \gamma \\ \E{(X \wedge x)^k} &= \theta^k B(1+k/\gamma, 1-k/\gamma; u) \\ &\phantom{=} + x^k (1 - u), \quad k > -\gamma \end{align*} \subsubsection{Paralogistic} \begin{itemize} \item Root: \code{paralogis} \item Parameters: \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha^2 u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\alpha \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k/\alpha) \Gamma(\alpha-k/\alpha)}{% \Gamma(\alpha)}, \quad -\alpha < k < \alpha^2 \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k/\alpha, \alpha-k/\alpha; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -\alpha \end{align*} \subsubsection{Generalized Pareto} \begin{itemize} \item Root: \code{genpareto} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u^\tau (1 - u)^\alpha}{x \beta (\alpha, \tau )}, \qquad u = \frac{v}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= \beta(\tau, \alpha; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k) \Gamma(\alpha-k)}{% \Gamma(\alpha) \Gamma(\tau)}, \quad -\tau < k < \alpha \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k, \alpha-k; u)}{% \Gamma(\alpha) \Gamma(\tau)} \\ &\phantom{=} + x^k [1 - \beta(\tau, \alpha; u)], \quad k > -\tau \end{align*} \subsubsection{Pareto} \begin{itemize} \item Root: \code{pareto} \item Parameters: \code{shape} ($\alpha$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha u^\alpha (1 - u)}{x}, \qquad u = \frac{1}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= 1 - u^\alpha \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(1+k) \Gamma(\alpha-k)}{% \Gamma(\alpha)}, \quad -1 < k < \alpha \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(1+k, \alpha-k; 1-u)}{% \Gamma(\alpha)} \\ &\phantom{=} + x^k u^\alpha, \quad k > -1 \end{align*} \subsubsection{Single-parameter Pareto (Pareto I)} \begin{itemize} \item Root: \code{pareto1} \item Parameters: \code{shape} ($\alpha$), \code{min} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\alpha \theta^\alpha}{x^{\alpha+1}}, \quad x > \theta \\ F(x) &= 1 - \left( \frac{\theta}{x} \right)^\alpha, \quad x > \theta \\ \displaybreak[0] \E{X^k} &= \frac{\alpha \theta^k}{\alpha - k}, \quad k < \alpha \\ \E{(X \wedge x)^k} &= \frac{\alpha \theta^k}{\alpha - k} - \frac{k \theta^\alpha}{(\alpha - k) x^{\alpha-k}}, \quad x \geq \theta \end{align*} Although there appears to be two parameters, only $\alpha$ is a true parameter. The value of $\theta$ is the minimum of the distribution and is usually set in advance. \subsubsection{Inverse Burr} \begin{itemize} \item Root: \code{invburr} \item Parameters: \code{shape1} ($\tau$), \code{shape2} ($\gamma$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau \gamma u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left( \frac{x}{\theta} \right)^\gamma \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\gamma) \Gamma(1-k/\gamma)}{% \Gamma(\tau)}, \quad -\gamma < k < \alpha\gamma \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\gamma, 1-k/\gamma; u)}{% \Gamma(\tau)} \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau\gamma \end{align*} \subsubsection{Inverse Pareto} \begin{itemize} \item Root: \code{invpareto} \item Parameters: \code{shape} ($\tau$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \frac{x}{\theta} \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k) \Gamma(1-k)}{% \Gamma(\tau)}, \quad -\tau < k < 1 \\ \E{(X \wedge x)^k} &= \theta^k \tau \int_0^u y^{\tau+k-1} (1 - y)^{-k}\, dy \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau \end{align*} \subsubsection{Inverse paralogistic} \begin{itemize} \item Root: \code{invparalogis} \item Parameters: \code{shape} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau^2 u^\tau (1 - u)}{x}, \qquad u = \frac{v}{1 + v}, \qquad v = \left(\frac{x}{\theta} \right)^\tau \\ F(x) &= u^\tau \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \Gamma(\tau+k/\tau) \Gamma(1-k/\tau)}{% \Gamma(\tau)}, \quad -\tau^2 < k < \tau \\ \E{(X \wedge x)^k} &= \frac{% \theta^k B(\tau+k/\tau, 1-k/\tau; u)}{% \Gamma(\tau)} \\ &\phantom{=} + x^k (1-u^\tau), \quad k > -\tau^2 \end{align*} \subsection{Transformed gamma family} \label{app:continuous:transformed-gamma} \subsubsection{Transformed gamma} \begin{itemize} \item Root: \code{trgamma} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\alpha e^{-u}}{x \Gamma(\alpha)}, \qquad u = \left( \frac{x}{\theta} \right)^\tau \\ F(x) &= \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha+k/\tau)}{\Gamma(\alpha)} \quad k > -\alpha\tau \\ \E{(X \wedge x)^k} &= \frac{\theta^k \Gamma(\alpha+k/\tau)}{\Gamma(\alpha)} \Gamma(\alpha+k/\tau; u) \\ &\phantom{=} + x^k [1 - \Gamma(\alpha; u)], \quad k > -\alpha\tau \end{align*} \subsubsection{Inverse transformed gamma} \begin{itemize} \item Root: \code{invtrgamma} \item Parameters: \code{shape1} ($\alpha$), \code{shape2} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^\alpha e^{-u}}{x\Gamma (\alpha)}, \qquad u = \left( \frac{\theta}{x} \right)^\tau \\ F(x) &= 1 - \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha-k/\tau)}{\Gamma(\alpha)} \quad k < \alpha\tau \\ \E{(X \wedge x)^k} &= \frac{\theta^k G(\alpha-k/\tau; u)}{\Gamma(\alpha)} + x^k \Gamma(\alpha; u), \quad \text{all }k \end{align*} \subsubsection{Inverse gamma} \begin{itemize} \item Root: \code{invgamma} \item Parameters: \code{shape} ($\alpha$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u^\alpha e^{-u}}{x\Gamma (\alpha)}, \qquad u = \frac{\theta}{x}\\ F(x) &= 1 - \Gamma (\alpha ; u) \\ \displaybreak[0] \E{X^k} &= \frac{\theta^k \Gamma(\alpha-k)}{\Gamma(\alpha)} \quad k < \alpha \\ \E{(X \wedge x)^k} &= \frac{\theta^k G(\alpha-k; u)}{\Gamma(\alpha)} + x^k \Gamma(\alpha; u), \quad \text{all }k \\ M(t) &= \frac{2}{\Gamma(\alpha)} (-\theta t)^{\alpha/2} K_\alpha(\sqrt{-4\theta t}) \end{align*} \subsubsection{Inverse Weibull} \begin{itemize} \item Root: \code{invweibull}, \code{lgompertz} \item Parameters: \code{shape} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u e^{-u}}{x}, \qquad u = \left( \frac{\theta}{x} \right)^\tau \\ F(x) &= e^{-u} \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1-k/\tau) \quad k < \tau \\ \E{(X \wedge x)^k} &= \theta^k G(1-k/\tau; u) + x^k (1 - e^{-u}), \quad \text{all }k \end{align*} \subsubsection{Inverse exponential} \begin{itemize} \item Root: \code{invexp} \item Parameters: \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{u e^{-u}}{x}, \qquad u = \frac{\theta}{x} \\ F(x) &= e^{-u} \\ \displaybreak[0] \E{X^k} &= \theta^k \Gamma(1-k) \quad k < 1 \\ \E{(X \wedge x)^k} &= \theta^k G(1-k; u) + x^k (1 - e^{-u}), \quad \text{all }k \end{align*} \subsection{Other distributions} \label{app:continuous:other} \subsubsection{Loggamma} \begin{itemize} \item Root: \code{lgamma} \item Parameters: \code{shapelog} ($\alpha$), \code{ratelog} ($\lambda$) \end{itemize} \begin{align*} f(x) &= \frac{\lambda^\alpha (\ln x)^{\alpha - 1}}{% x^{\lambda + 1} \Gamma(\alpha)}, \quad x > 1 \\ F(x) &= \Gamma( \alpha ; \lambda \ln x), \quad x > 1 \\ \displaybreak[0] \E{X^k} &= \left( \frac{\lambda}{\lambda - k} \right)^\alpha, \quad k < \lambda \\ \E{(X \wedge x)^k} &= \left( \frac{\lambda}{\lambda - k} \right)^\alpha \Gamma(\alpha; (\lambda - k) \ln x) \\ &\phantom{=} + x^k (1 - \Gamma(\alpha; \lambda \ln x)), \quad k < \lambda \end{align*} \subsubsection{Gumbel} \begin{itemize} \item Root: \code{gumbel} \item Parameters: \code{alpha} ($-\infty < \alpha < \infty$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{e^{-(u + e^{-u})}}{\theta}, \qquad u = \frac{x - \alpha}{\theta}, \qquad -\infty < x < \infty \\ F(x) &= \exp[-\exp(-u)] \\ \displaybreak[0] \E{X} &= \alpha + \gamma \theta, \quad \gamma \approx 0.57721566490153 \\ \VAR{X} &= \frac{\pi^2 \theta^2}{6} \\ M(t) &= e^{\alpha t} \Gamma(1 - \theta t) \end{align*} \subsubsection{Inverse Gaussian} \begin{itemize} \item Root: \code{invgauss} \item Parameters: \code{mean} ($\mu$), \code{shape} ($\lambda = 1/\phi$), \code{dispersion} ($\phi$) \end{itemize} \begin{align*} f(x) &= \left( \frac{1}{2 \pi \phi x^3} \right)^{1/2} \exp\left\{ -\frac{(x/\mu - 1)^2}{2 \phi x} \right\} \\ F(x) &= \Phi\left( \frac{x/\mu - 1}{\sqrt{\phi x}} \right) + e^{2/(\phi\mu)} \Phi\left( -\frac{x/\mu + 1}{\sqrt{\phi x}} \right) \\ \displaybreak[0] \E{X^k} &= \mu^k \sum_{i = 0}^{k - 1} \frac{(k + i - 1)!}{i! (k - i - 1)!} \left( \frac{\phi \mu}{2} \right)^{i}, \quad k = 1, 2, \dots \\ \E{X \wedge x} &= \mu \left[ \Phi\left( \frac{x/\mu - 1}{\sqrt{\phi x}} \right) - e^{2/(\phi\mu)} \Phi\left(- \frac{x/\mu + 1}{\sqrt{\phi x}} \right) \right] \\ &\phantom{=} + x (1 - F(x)) \\ M(t) &= \exp \left\{ \frac{1}{\phi \mu} \left(1 - \sqrt{1 - 2 \phi \mu^2 t}\right) \right\}, \quad t \leq \frac{1}{2 \phi \mu^2} \end{align*} \noindent% The limiting case $\mu = \infty$ is an inverse gamma distribution with $\alpha = 1/2$ and $\lambda = 2\phi$ (or inverse chi-squared). \subsubsection{Generalized beta} \begin{itemize} \item Root: \code{genbeta} \item Parameters: \code{shape1} ($a$), \code{shape2} ($b$), \code{shape3} ($\tau$), \code{rate} ($\lambda = 1/\theta$), \code{scale} ($\theta$) \end{itemize} \begin{align*} f(x) &= \frac{\tau u^a (1 - u)^{b - 1}}{x \beta (a, b)}, \qquad u = \left( \frac{x}{\theta} \right)^\tau, \qquad 0 < x < \theta \\ F(x) &= \beta (a, b ; u) \\ \displaybreak[0] \E{X^k} &= \frac{% \theta^k \beta(a+k/\tau, b)}{\beta(a, b)}, \quad k > -a\tau \\ \E{(X \wedge x)^k} &= \frac{% \theta^k \beta(a+k/\tau, b)}{\beta(a, b)} \beta(a+k/\tau, b; u) \\ &\phantom{=} + x^k [1 - \beta(a, b; u)], \quad k > -\tau\gamma \end{align*} \section{Phase-type distributions} \label{app:phase-type} Consider a continuous-time Markov process with $m$ transient states and one absorbing state. Let \begin{equation*} \mat{Q} = \begin{bmatrix} \mat{T} & \mat{t} \\ \mat{0} & 0 \end{bmatrix} \end{equation*} be the transition rates matrix (or intensity matrix) of such a process and let $(\mat{\pi}, \pi_{m + 1})$ be the initial probability vector. Here, $\mat{T}$ is an $m \times m$ non-singular matrix with $t_{ii} < 0$ for $i = 1, \dots, m$ and $t_{ij} \geq 0$ for $i \neq j$; $\mat{\pi}$ is an $1 \times m$ vector of probabilities such that $\mat{\pi} \mat{e} + \pi_{m + 1} = 1$; $\mat{t} = -\mat{T} \mat{e}$; $\mat{e} = [1]_{m \times 1}$ is a column vector of ones. % \bigskip \begin{itemize} \item Root: \code{phtype} \item Parameters: \code{prob} ($\mat{\pi}_{1 \times m}$), \code{rates} ($\mat{T}_{m \times m}$) \end{itemize} \begin{align*} f(x) &= \begin{cases} 1 - \mat{\pi} \mat{e} & x = 0, \\ \mat{\pi} e^{\mat{T} x} \mat{t}, & x > 0 \end{cases} \\ F(x) &= \begin{cases} 1 - \mat{\pi} \mat{e}, & x = 0, \\ 1 - \mat{\pi} e^{\mat{T} x} \mat{e}, & x > 0 \end{cases} \\ \E{X^k} &= k! \mat{\pi} (-\mat{T})^{-k} \mat{e} \\ M(t) &= \mat{\pi} (-t \mat{I} - \mat{T})^{-1} \mat{t} + (1 - \mat{\pi} \mat{e}) \end{align*} \section{Discrete distributions} \label{app:discrete} This appendix gives the root name and the parameters of the R support functions for the members of the $(a, b, 0)$ and $(a, b, 1)$ discrete distributions as defined in \citet{LossModels4e}; the values of $a$, $b$ and $p_0$ in the representation; the pmf; the relationship to other distributions, when there is one. Since at some point we developed the formulas for the first three cumulants\footnote{% Hence the mean, variance and skewness.} % of the distributions of the $(a, b, 1)$ class, they are also recorded here for posterity. The appendix also provides the main characteristics of the Poisson-inverse Gaussian distribution. \subsection{Standard distributions} \label{app:discrete:a-b-0} This section contains distributions of the $(a, b, 0)$ class. They are all supported in base R. Their pmf can be computed recursively by fixing $p_0$ to the specified value and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 1, 2, \dots$. All parameters are finite. \subsubsection{Poisson} \begin{itemize} \item Root: \code{pois} \item Parameter: \code{lambda} ($\lambda \geq 0$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_0 = e^{-\lambda} \\ p_k &= \frac{e^{-\lambda} \lambda^k}{k!} \end{align*} \subsubsection{Negative binomial} \begin{itemize} \item Root: \code{nbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$), \code{mu} ($r(1 - p)/p$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_0 = p^r \\ p_k &= \binom{r+k-1}{k} p^r (1 - p)^k \end{align*} \begin{itemize} \item Special case: Geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Geometric} \begin{itemize} \item Root: \code{geom} \item Parameter: \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_0 = p \\ p_k &= p (1 - p)^k \end{align*} \subsubsection{Binomial} \begin{itemize} \item Root: \code{binom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_0 = (1 - p)^n \\ p_k &= \binom{n}{k} p^k (1 - p)^{n - k}, \quad k = 1, 2, \dots, n \end{align*} \begin{itemize} \item Special case: Bernoulli$(p)$ when $n = 1$. \end{itemize} \subsection{Zero-truncated distributions} \label{app:discrete:zt} Package \pkg{actuar} provides support for the distributions of the $(a, b, 1)$ class in this section. Zero-truncated distributions have probability at zero $p_0^T = 0$. Their pmf can be computed recursively by fixing $p_1$ to the value specified below and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 2, 3, \dots$. The distributions are all defined on $k = 1, 2, \dots$. The cumulants are developed using \eqref{eq:cumulants}. A limiting case of all zero-truncated distributions is a single point mass in $k = 1$. \subsubsection{Zero-truncated Poisson} \begin{itemize} \item Root: \code{ztpois} \item Parameter: \code{lambda} ($\lambda \geq 0$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_1 = \frac{\lambda}{e^\lambda - 1} \\ p_k &= \frac{\lambda^k}{k! (e^\lambda - 1)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= c \lambda, \qquad c = \frac{1}{1 - e^{-\lambda}} \\ \kappa_2 &= c \lambda + c(1 - c) \lambda^2 \\ \kappa_3 &= c(1 - c)(1 - 2c) \lambda^3 + 3c(1 - c) \lambda^2 + c \lambda \end{align*} \subsubsection{Zero-truncated negative binomial} \begin{itemize} \item Root: \code{ztnbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_1 = \frac{r p^r (1 - p)}{1 - p^r} \\ p_k &= \binom{r+k-1}{k} \frac{p^r (1 - p)^k}{1 - p^r} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= crd, \qquad c = \frac{1}{1 - p^r}, \qquad d = \frac{1 - p}{p} \\ \kappa_2 &= crd(d + 1) + c(1 - c) r^2 d^2 \\ \kappa_3 &= crd(3rd(d + 1) + r^2 d^2 + (d + 1)(2d + 1)) \\ &\phantom{=} - 3(crd)^2(rd + d + 1) + 2(crd)^3 \end{align*} \begin{itemize} \item Special cases: Logarithmic$(1 - p)$ when $r = 0$; Zero-truncated geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Zero-truncated geometric} \begin{itemize} \item Root: \code{ztgeom} \item Parameter: \code{prob} ($0 < p \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_1 = p \\ p_k &= p (1 - p)^{k - 1} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{1}{p} \\ \kappa_2 &= \frac{1 - p}{p^2} \\ \kappa_3 &= \frac{(1 - p)(2 - p)}{p^3} \end{align*} \subsubsection{Zero-truncated binomial} \begin{itemize} \item Root: \code{ztbinom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_1 = \frac{n p (1 - p)^{n - 1}}{1 - (1 - p)^n} \\ p_k &= \binom{n}{k} \frac{p^k (1 - p)^{n - k}}{1 - (1 - p)^n}, \quad k = 1, 2, \dots, n \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= cnp, \qquad c = \frac{1}{1 - (1 - p)^n} \\ \kappa_2 &= cnp(1 - p) + c(1 - c)n^2p^2 \\ \kappa_3 &= cnp(1 + 3(n - 1)p + (n - 1)(n - 2)p^2) \\ &\phantom{=} - 3(cnp)^2(1 + (n - 1)p) + 2(cnp)^3 \end{align*} \subsubsection{Logarithmic} \begin{itemize} \item Root: \code{logarithmic} \item Parameter: \code{prob} ($0 \leq p < 1$) \end{itemize} \begin{align*} a &= p, \qquad b = -p, \qquad p_1 = - \frac{p}{\log (1 - p)} \\ p_k &= - \frac{p^k}{k \log (1 - p)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{-p}{\ln(1 - p)(1 - p)} \\ \kappa_2 &= \frac{-p(p + 2\ln(1 - p)) + p\ln(1 - p)}% {\ln(1 - p)^2(1 - p)^2} \\ \kappa_3 &= \frac{p(p + 1)\ln(1 - p)^2(1 - p)^3(p - 1)^{-3} - 3p^2 \ln(1 - p) - 2p^3}% {-(-p(p + 2 \ln(1 - p)) + p \ln(1 - p))^{3/2}} \end{align*} \subsection{Zero-modified distributions} \label{app:discrete:zm} Package \pkg{actuar} provides support for the distributions of the $(a, b, 1)$ class in this section. Zero-modified distributions have an arbitrary probability at zero $p_0^M \neq p_0$, where $p_0$ is the probability at zero for the corresponding member of the $(a, b, 0)$ class. Their pmf can be computed recursively by fixing $p_1$ to the value specified below and then using $p_k = (a + b/k) p_{k - 1}$, for $k = 2, 3, \dots$. The distributions are all defined on $k = 0, 1, 2, \dots$. The cumulants are developed using \eqref{eq:cumulants} with $c$ as defined in \eqref{eq:c-in-cumulants-zm}. A limiting case of all zero-modified distributions is a discrete mixture between a point mass in $k = 0$ (with probability $p_0^M$) and a point mass in $k = 1$ (with probability $1 - p_0^M$). \subsubsection{Zero-modified Poisson} \begin{itemize} \item Root: \code{zmpois} \item Parameters: \code{lambda} ($\lambda > 0$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 0, \qquad b = \lambda, \qquad p_1 = \frac{(1 - p_0^M) \lambda}{e^\lambda - 1} \\ p_k &= \frac{(1 - p_0^M) \lambda^k}{k! (e^\lambda - 1)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= c \lambda, \qquad c = \frac{1 - p_0^M}{1 - e^{-\lambda}} \\ \kappa_2 &= c \lambda + c(1 - c) \lambda^2 \\ \kappa_3 &= c(1 - c)(1 - 2c) \lambda^3 + 3c(1 - c) \lambda^2 + c \lambda \end{align*} \subsubsection{Zero-modified negative binomial} \begin{itemize} \item Root: \code{zmnbinom} \item Parameters: \code{size} ($r \geq 0$), \code{prob} ($0 < p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = (r - 1)(1 - p), \qquad p_1 = \frac{(1 - p_0^M) r p^r (1 - p)}{1 - p^r} \\ p_k &= \binom{r+k-1}{k} \frac{(1 - p_0^M) p^r (1 - p)^k}{1 - p^r} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= crd, \qquad c = \frac{1 - p_0^M}{1 - p^r}, \qquad d = \frac{1 - p}{p} \\ \kappa_2 &= crd(d + 1) + c(1 - c) r^2 d^2 \\ \kappa_3 &= crd(3rd(d + 1) + r^2 d^2 + (d + 1)(2d + 1)) \\ &\phantom{=} - 3(crd)^2(rd + d + 1) + 2(crd)^3 \end{align*} \begin{itemize} \item Special cases: Zero-modified logarithmic$(1 - p)$ when $r = 0$; Zero-modified geometric$(p)$ when $r = 1$. \end{itemize} \subsubsection{Zero-modified geometric} \begin{itemize} \item Root: \code{zmgeom} \item Parameters: \code{prob} ($0 < p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= 1 - p, \qquad b = 0, \qquad p_1 = (1 - p_0^M) p \\ p_k &= (1 - p_0^M) p (1 - p)^{k - 1} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{1 - p_0^M}{p} \\ \kappa_2 &= \frac{(1 - p_0^M)(1 - p + p_0^M)}{p^2} \\ \kappa_3 &= \frac{(1 - p_0^M)(6 - 6p + p^2) - 3(1 - p_0^M)^2(2 - p) + 2(1 - p_0^M)^3}{p^3} \end{align*} \subsubsection{Zero-modified binomial} \begin{itemize} \item Root: \code{zmbinom} \item Parameters: \code{size} ($n = 0, 1, 2, \dots$), \code{prob} ($0 \leq p \leq 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= -\frac{p}{1 - p}, \qquad b = \frac{(n + 1)p}{1 - p}, \qquad p_1^M = \frac{n (1 - p_0^M) p (1 - p)^{n - 1}}{1 - (1 - p)^n} \\ p_k &= \binom{n}{k} \frac{(1 - p_0^M) p^k (1 - p)^{n - k}}{1 - (1 - p)^n}, \quad k = 1, 2, \dots, n \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= cnp, \qquad c = \frac{1 - p_0^M}{1 - (1 - p)^n} \\ \kappa_2 &= cnp(1 - p) + c(1 - c)n^2p^2 \\ \kappa_3 &= cnp(1 + 3(n - 1)p + (n - 1)(n - 2)p^2) \\ &\phantom{=} - 3(cnp)^2(1 + (n - 1)p) + 2(cnp)^3 \end{align*} \subsubsection{Zero-modified logarithmic} \begin{itemize} \item Root: \code{zmlogarithmic} \item Parameters: \code{prob} ($0 \leq p < 1$), \code{p0} ($0 \leq p_0^M \leq 1$) \end{itemize} \begin{align*} a &= p, \qquad b = -p, \qquad p_1 = - \frac{(1 - p_0^M) p}{\log (1 - p)} \\ p_k &= - \frac{(1 - p_0^M) p^k}{k \log (1 - p)} \displaybreak[0] \\[0.5\baselineskip] \kappa_1 &= \frac{-cp}{\ln(1 - p)(1 - p)}, \qquad c = 1 - p_0^M \\ \kappa_2 &= \frac{-cp(p + 2 \ln(1 - p)) + c^2 p \ln(1 - p)}% {\ln(1 - p)^2 (1 - p)^2} \\ \kappa_3 &= \frac{cp(p + 1) \ln(1 - p)^2(1 - p)^3(p - 1)^{-3} - 3c^2 p^2 \ln(1 - p) - 2c^3 p^3}% {-(-cp(p + 2 \ln(1 - p)) + c^2 p \ln(1 - p))^{3/2}} \end{align*} \subsection{Poisson-inverse Gaussian} \label{app:discrete:pig} \begin{itemize} \item Root: \code{poisinvgauss}, \code{pig} \item Parameters: \code{mean} ($\mu > 0$), \code{shape} ($\lambda = 1/\phi$), \code{dispersion} ($\phi > 0$) \end{itemize} \begin{align*} p_x &= \sqrt{\frac{2}{\pi \phi}} \frac{e^{(\phi\mu)^{-1}}}{x!} \left( \sqrt{2\phi \left( 1 + \frac{1}{2\phi\mu^2} \right)} \right)^{- \left( x - \frac{1}{2} \right)} \\ &\phantom{=} \times K_{x - 1/2} \left( \sqrt{\frac{2}{\phi}\left(1 + \frac{1}{2\phi\mu^2}\right)} \right), \quad x = 0, 1, \dots, \end{align*} \noindent% Recursively: \begin{align*} p_0 &= \exp\left\{ \frac{1}{\phi\mu} \left(1 - \sqrt{1 + 2\phi\mu^2}\right) \right\} \\ p_1 &= \frac{\mu}{\sqrt{1 + 2\phi\mu^2}}\, p_0 \\ p_x &= \frac{2\phi\mu^2}{1 + 2\phi\mu^2} \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{\mu^2}{1 + 2\phi\mu^2} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{align*} \noindent% In the limiting case $\mu = \infty$, the pmf reduces to \begin{equation*} p_x = \sqrt{\frac{2}{\pi \phi}} \frac{1}{x!} (\sqrt{2\phi})^{- \left( x - \frac{1}{2} \right)} K_{x - \frac{1}{2}} (\sqrt{2/\phi}), \quad x = 0, 1, \dots \end{equation*} and the recurrence relations become \begin{align*} p_0 &= \exp\left\{-\sqrt{2/\phi}\right\} \\ p_1 &= \frac{1}{\sqrt{2\phi}}\, p_0 \\ p_x &= \left( 1 - \frac{3}{2x} \right) p_{x - 1} + \frac{1}{2\phi} \frac{1}{x(x - 1)}\, p_{x - 2}, \quad x = 2, 3, \dots. \end{align*} %% References \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% coding: utf-8 %%% TeX-master: t %%% End: actuar/inst/doc/simulation.Rnw0000644000176200001440000004772215151411046016141 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Simulation of insurance data} %\VignettePackage{actuar} %\SweaveUTF8 \title{Simulation of insurance data with \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal \\[3ex] Louis-Philippe Pouliot \\ Université Laval} \date{} <>= library(actuar) options(width = 52, digits = 4) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} Package \pkg{actuar} provides functions to facilitate the generation of random variates from various probability models commonly used in actuarial applications. From the simplest to the most sophisticated, these functions are: \begin{enumerate} \item \code{rmixture} to simulate from discrete mixtures; \item \code{rcompound} to simulate from compound models (and a simplified version, \code{rcompois} to simulate from the very common compound Poisson model); \item \code{rcomphierarc} to simulate from compound models where both the frequency and the severity components can have a hierarchical structure. \end{enumerate} \section{Simulation from discrete mixtures} \label{sec:rmixture} A random variable is said to be a discrete mixture of the random variables with probability density functions $f_1, \dots, f_n$ if its density can be written as \begin{equation} \label{eq:mixture} f(x) = p_1 f_1(x) + \dots + p_n f_n(x) = \sum_{i = 1}^n p_i f_i(x), \end{equation} where $p_1, \dots, p_n$ are probabilities (or weights) such that $p_i \geq 0$ and $\sum_{i = 1}^n p_i = 1$. Function \code{rmixture} makes it easy to generate random variates from such mixtures. The arguments of the function are: \begin{enumerate} \item \code{n} the number of variates to generate; \item \code{probs} a vector of values that will be normalized internally to create the probabilities $p_1, \dots, p_n$; \item \code{models} a vector of expressions specifying the simulation models corresponding to the densities $f_1, \dots, f_n$. \end{enumerate} The specification of simulation models follows the syntax of \code{rcomphierarc} (explained in greater detail in \autoref{sec:rcomphierarc}). In a nutshell, the models are expressed in a semi-symbolic fashion using an object of mode \code{"expression"} where each element is a complete call to a random number generation function, with the number of variates omitted. The following example should clarify this concept. \begin{example} Let $X$ be a mixture between two exponentials: one with mean $1/3$ and one with mean $1/7$. The first exponential has twice as much weight as the second one in the mixture. Therefore, the density of $X$ is \begin{equation*} f(x) = \frac{2}{3} (3 e^{-3x}) + \frac{1}{3} (7 e^{-7x}) \\ = 2 e^{-3x} + \frac{7}{3} e^{-7x}. \end{equation*} The following expression generates $10$ random variates from this density using \code{rmixture}. <>= rmixture(10, probs = c(2, 1), models = expression(rexp(3), rexp(7))) @ \qed \end{example} See also \autoref{ex:comppois} for a more involved application combining simulation from a mixture and simulation from a compound Poisson model. \section{Simulation from compound models} \label{sec:rcompound} Actuaries often need to simulate separately the frequency and the severity of claims for compound models of the form \begin{equation} \label{eq:definition-S} S = C_1 + \dots + C_N, \end{equation} where $C_1, C_2, \dots$ are the mutually independent and identically distributed random variables of the claim amounts, each independent of the frequency random variable $N$. Function \code{rcompound} generates variates from the random variable $S$ when the distribution of both random variables $N$ and $C$ is non hierarchical; for the more general hierarchical case, see \autoref{sec:rcomphierarc}. The function has three arguments: \begin{enumerate} \item \code{n} the number of variates to generate; \item \code{model.freq} the frequency model (random variable $N$); \item \code{model.sev} the severity model (random variable $C$). \end{enumerate} Arguments \code{model.freq} and \code{model.sev} are simple R expressions consisting of calls to a random number generation function with the number of variates omitted. This is of course similar to argument \code{models} of \code{rmixture}, only with a slightly simpler syntax since one does not need to wrap the calls in \code{expression}. Function \code{rcomppois} is a simplified interface for the common case where $N$ has a Poisson distribution and, therefore, $S$ is compound Poisson. In this function, argument \code{model.freq} is replaced by \code{lambda} that takes the value of the Poisson parameter. \begin{example} Let $S \sim \text{Compound Poisson}(1.5, F)$, where $1.5$ is the value of the Poisson parameter and $F$ is the cumulative distribution function of a gamma distribution with shape parameter $\alpha = 3$ and rate parameter $\lambda = 2$. We obtain variates from the random variable $S$ using \code{rcompound} or \code{rcompois} as follows: <>= rcompound(10, rpois(1.5), rgamma(3, 2)) rcomppois(10, 1.5, rgamma(3, 2)) @ Specifying argument \code{SIMPLIFY = FALSE} to either function will return not only the variates from $S$, but also the underlying variates from the random variables $N$ and $C_1, \dots, C_N$: <>= rcomppois(10, 1.5, rgamma(3, 2), SIMPLIFY = FALSE) @ \qed \end{example} \begin{example} \label{ex:comppois} Theorem~9.7 of \cite{LossModels4e} states that the sum of compound Poisson random variables is itself compound Poisson with Poisson parameter equal to the sum of the Poisson parameters and severity distribution equal to the mixture of the severity models. Let $S = S_1 + S_2 + S_3$, where: % $S_1$ is compound Poisson with mean frequency $\lambda = 2$ and severity gamma with parameters $(3, 1)$; % $S_2$ is compound Poisson with $\lambda = 1$ and severity Gamma with parameters $(5, 4)$; % $S_3$ is compound Poisson with $\lambda = 1/2$ and severity Lognormal with parameters $(2, 1)$. % By the aforementioned theorem, $S$ is compound Poisson with $\lambda = 2 + 1 + 1/2 = 7/2$ and severity density \begin{equation*} f(x) = \frac{4}{7} \left( \frac{1}{\Gamma(3)} x^2 e^{-x} \right) + \frac{2}{7} \left( \frac{4^5}{\Gamma(5)} x^4 e^{-4x} \right) + \frac{1}{7} \phi(\ln x - 2). \end{equation*} Combining \code{rcomppois} and \code{rmixture} we can generate variates of $S$ using the following elegant expression. <>= x <- rcomppois(1e5, 3.5, rmixture(probs = c(2, 1, 0.5), expression(rgamma(3), rgamma(5, 4), rlnorm(2, 1)))) @ One can verify that the theoretical mean of $S$ is $6 + 5/4 + (e^{5/2})/2 = 13.34$. Now, the empirical mean based on the above sample of size $10^5$ is: <>= mean(x) @ \qed \end{example} \section{Simulation from compound hierarchical models} \label{sec:rcomphierarc} Hierarchical probability models are widely used for data classified in a tree-like structure and in Bayesian inference. The main characteristic of such models is to have the probability law at some level in the classification structure be conditional on the outcome in previous levels. For example, adopting a bottom to top description of the model, a simple hierarchical model could be written as \begin{equation} \label{eq:basic_model} \begin{split} X_t|\Lambda, \Theta &\sim \text{Poisson}(\Lambda) \\ \Lambda|\Theta &\sim \text{Gamma}(3, \Theta) \\ \Theta &\sim \text{Gamma}(2, 2), \end{split} \end{equation} where $X_t$ represents actual data. The random variables $\Theta$ and $\Lambda$ are generally seen as uncertainty, or risk, parameters in the actuarial literature; in the sequel, we refer to them as mixing parameters. The example above is merely a multi-level mixture of models, something that is simple to simulate ``by hand''. The following R expression will yield $n$ variates of the random variable $X_t$: <>= rpois(n, rgamma(n, 3, rgamma(n, 2, 2))) @ However, for categorical data common in actuarial applications there will usually be many categories --- or \emph{nodes} --- at each level. Simulation is then complicated by the need to always use the correct parameters for each variate. Furthermore, one may need to simulate both the frequency and the severity of claims for compound models of the form \eqref{eq:definition-S}. This section briefly describes function \code{rcomphierarc} and its usage. \cite{Goulet:simpf:2008} discuss in more details the models supported by the function and give more thorough examples. \subsection{Description of hierarchical models} \label{sec:rcomphierarc:description} We consider simulation of data from hierarchical models. We want a method to describe these models in R that meets the following criteria: \begin{enumerate} \item simple and intuitive to go from the mathematical formulation of the model to the R formulation and back; \item allows for any number of levels and nodes; \item at any level, allows for any use of parameters higher in the hierarchical structure. \end{enumerate} A hierarchical model is completely specified by the number of nodes at each level and by the probability laws at each level. The number of nodes is passed to \code{rcomphierarc} by means of a named list where each element is a vector of the number of nodes at a given level. Vectors are recycled when the number of nodes is the same throughout a level. Probability models are expressed in a semi-symbolic fashion using an object of mode \code{"expression"}. Each element of the object must be named --- with names matching those of the number of nodes list --- and should be a complete call to an existing random number generation function, but with the number of variates omitted. Hierarchical models are achieved by replacing one or more parameters of a distribution at a given level by any combination of the names of the levels above. If no mixing is to take place at a level, the model for this level can be \code{NULL}. \begin{example} Consider the following expanded version of model \eqref{eq:basic_model}: \begin{align*} X_{ijt}|\Lambda_{ij}, \Theta_i &\sim \text{Poisson}(\Lambda_{ij}), & t &= 1, \dots, n_{ij} \\ \Lambda_{ij}|\Theta_i &\sim \text{Gamma}(3, \Theta_i), & j &= 1, \dots, J_i \\ \Theta_i &\sim \text{Gamma}(2, 2), & i &= 1, \dots, I, \end{align*} with $I = 3$, $J_1 = 4$, $J_2 = 5$, $J_3 = 6$ and $n_{ij} \equiv n = 10$. Then the number of nodes and the probability model are respectively specified by the following expressions. \begin{Schunk} \begin{Verbatim} list(Theta = 3, Lambda = c(4, 5, 6), Data = 10) \end{Verbatim} \end{Schunk} \begin{Schunk} \begin{Verbatim} expression(Theta = rgamma(2, 2), Lambda = rgamma(3, Theta), Data = rpois(Lambda)) \end{Verbatim} \end{Schunk} \qed \end{example} Storing the probability model requires an expression object in order to avoid evaluation of the incomplete calls to the random number generation functions. Function \code{rcomphierarc} builds and executes the calls to the random generation functions from the top of the hierarchical model to the bottom. At each level, the function \begin{enumerate} \item infers the number of variates to generate from the number of nodes list, and \item appropriately recycles the mixing parameters simulated previously. \end{enumerate} The actual names in the list and the expression object can be anything; they merely serve to identify the mixing parameters. Furthermore, any random generation function can be used. The only constraint is that the name of the number of variates argument is \code{n}. In addition, \code{rcomphierarc} supports usage of weights in models. These usually modify the frequency parameters to take into account the ``size'' of an entity. Weights are used in simulation wherever the name \code{weights} appears in a model. \subsection[Usage of rcomphierarc]{Usage of \code{rcomphierarc}} \label{sec:rcomphierarc:usage} Function \code{rcomphierarc} can simulate data for structures where both the frequency model and the severity model are hierarchical. It has four main arguments: \begin{enumerate} \item \code{nodes} for the number of nodes list; \item \code{model.freq} for the frequency model; \item \code{model.sev} for the severity model; \item \code{weights} for the vector of weights in lexicographic order, that is all weights of entity 1, then all weights of entity 2, and so on. \end{enumerate} The function returns the variates in a list of class \code{"portfolio"} with a \code{dim} attribute of length two. The list contains all the individual claim amounts for each entity. Since every element can be a vector, the object can be seen as a three-dimension array with a third dimension of potentially varying length. The function also returns a matrix of integers giving the classification indexes of each entity in the portfolio. The package also defines methods for four generic functions to easily access key quantities for each entity of the simulated portfolio: \begin{enumerate} \item a method of \code{aggregate} to compute the aggregate claim amounts $S$; \item a method of \code{frequency} to compute the number of claims $N$; \item a method of \code{severity} (a generic function introduced by the package) to return the individual claim amounts $C_j$; \item a method of \code{weights} to extract the weights matrix. \end{enumerate} In addition, all methods have a \code{classification} and a \code{prefix} argument. When the first is \code{FALSE}, the classification index columns are omitted from the result. The second argument overrides the default column name prefix; see the \code{rcomphierarc.summaries} help page for details. The following example illustrates these concepts in detail. \begin{example} Consider the following compound hierarchical model: \begin{equation*} S_{ijt} = C_{ijt1} + \dots + C_{ijt N_{ijt}}, \end{equation*} for $i = 1, \dots, I$, $j = 1, \dots, J_i$, $t = 1, \dots, n_{ij}$ and with \begin{align*} N_{ijt}|\Lambda_{ij}, \Phi_i &\sim \text{Poisson}(w_{ijt} \Lambda_{ij}) & C_{ijtu}|\Theta_{ij}, \Psi_i &\sim \text{Lognormal}(\Theta_{ij}, 1) \notag \\ \Lambda_{ij}|\Phi_i &\sim \text{Gamma}(\Phi_i, 1) & \Theta_{ij}|\Psi_i &\sim N(\Psi_i, 1) \\ \Phi_i &\sim \text{Exponential}(2) & \Psi_i &\sim N(2, 0.1). \notag \end{align*} (Note how weights modify the Poisson parameter.) Using as convention to number the data level 0, the above is a two-level compound hierarchical model. Assuming that $I = 2$, $J_1 = 4$, $J_2 = 3$, $n_{11} = \dots = n_{14} = 4$ and $n_{21} = n_{22} = n_{23} = 5$ and that weights are simply simulated from a uniform distribution on $(0.5, 2.5)$, then simulation of a data set with \code{rcomphierarc} is achieved with the following expressions. <>= set.seed(3) @ <>= nodes <- list(cohort = 2, contract = c(4, 3), year = c(4, 4, 4, 4, 5, 5, 5)) mf <- expression(cohort = rexp(2), contract = rgamma(cohort, 1), year = rpois(weights * contract)) ms <- expression(cohort = rnorm(2, sqrt(0.1)), contract = rnorm(cohort, 1), year = rlnorm(contract, 1)) wijt <- runif(31, 0.5, 2.5) pf <- rcomphierarc(nodes = nodes, model.freq = mf, model.sev = ms, weights = wijt) @ Object \code{pf} is a list of class \code{"portfolio"} containing, among other things, the aforementioned two-dimension list as element \code{data} and the classification matrix (subscripts $i$ and $j$) as element \code{classification}: <>= class(pf) pf$data pf$classification @ The output of \code{pf\$data} is not much readable. If we were to print the results of \code{rcomphierarc} this way, many users would wonder what \code{Numeric,\emph{n}} means. (It is actually R's way to specify that a given element in the list is a numeric vector of length $n$ --- the third dimension mentioned above.) To ease reading, the \code{print} method for objects of class \code{"portfolio"} only prints the simulation model and the number of claims in each node: <>= pf @ By default, the method of \code{aggregate} returns the values of $S_{ijt}$ in a regular matrix (subscripts $i$ and $j$ in the rows, subscript $t$ in the columns). The method has a \code{by} argument to get statistics for other groupings and a \code{FUN} argument to get statistics other than the sum: <>= aggregate(pf) aggregate(pf, by = c("cohort", "year"), FUN = mean) @ The method of \code{frequency} returns the values of $N_{ijt}$. It is mostly a wrapper for the \code{aggregate} method with the default \code{sum} statistic replaced by \code{length}. Hence, arguments \code{by} and \code{FUN} remain available: <>= frequency(pf) frequency(pf, by = "cohort") @ The method of \code{severity} returns the individual variates $C_{ijtu}$ in a matrix similar to those above, but with a number of columns equal to the maximum number of observations per entity, \begin{displaymath} \max_{i, j} \sum_{t = 1}^{n_{ij}} N_{ijt}. \end{displaymath} Thus, the original period of observation (subscript $t$) and the identifier of the severity within the period (subscript $u$) are lost and each variate now constitute a ``period'' of observation. For this reason, the method provides an argument \code{splitcol} in case one would like to extract separately the individual severities of one or more periods: <>= severity(pf) severity(pf, splitcol = 1) @ Finally, the weights matrix corresponding to the data in object \code{pf} is <>= weights(pf) @ Combined with the argument \code{classification = FALSE}, the above methods can be used to easily compute loss ratios: <>= aggregate(pf, classif = FALSE)/ weights(pf, classif = FALSE) @ \qed \end{example} \begin{example} \cite{Scollnik:2001:MCMC} considers the following model for the simulation of claims frequency data in a Markov Chain Monte Carlo (MCMC) context: \begin{align*} S_{it}|\Lambda_i, \alpha, \beta &\sim \text{Poisson}(w_{ij} \Lambda_i) \\ \Lambda_i|\alpha, \beta &\sim \text{Gamma}(\alpha, \beta) \\ \alpha &\sim \text{Gamma}(5, 5) \\ \beta &\sim \text{Gamma}(25, 1) \end{align*} for $i = 1, 2, 3$, $j = 1, \dots, 5$ and with weights $w_{it}$ simulated from \begin{align*} w_{it}|a_i, b_i &\sim \text{Gamma}(a_i, b_i) \\ a_i &\sim U(0, 100) \\ b_i &\sim U(0, 100). \end{align*} Strictly speaking, this is not a hierarchical model since the random variables $\alpha$ and $\beta$ are parallel rather than nested. Nevertheless, with some minor manual intervention, function \code{rcomphierarc} can simulate data from this model. First, one simulates the weights (in lexicographic order) with <>= set.seed(123) @ <>= wit <- rgamma(15, rep(runif(3, 0, 100), each = 5), rep(runif(3, 0, 100), each = 5)) @ Second, one calls \code{rcomphierarc} to simulate the frequency data. The key here consists in manually inserting the simulation of the shape and rate parameters of the gamma distribution in the model for $\Lambda_i$. Finally, wrapping the call to \code{rcomphierarc} in \code{frequency} will immediately yield the matrix of observations: <>= frequency(rcomphierarc(list(entity = 3, year = 5), expression(entity = rgamma(rgamma(1, 5, 5), rgamma(1, 25, 1)), year = rpois(weights * entity)), weights = wit)) @ \qed \end{example} One will find more examples of \code{rcomphierarc} usage in the \code{simulation} demo file. The function was used to simulate the data in \cite{Goulet_cfs}. %% References \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% coding: utf-8 %%% TeX-master: t %%% End: actuar/inst/doc/credibility.R0000644000176200001440000001072115151412373015704 0ustar liggesusers### R code from vignette source 'credibility.Rnw' ################################################### ### code chunk number 1: credibility.Rnw:14-16 ################################################### library(actuar) options(width = 57, digits = 4, deparse.cutoff = 30L) ################################################### ### code chunk number 2: credibility.Rnw:52-54 ################################################### data(hachemeister) hachemeister ################################################### ### code chunk number 3: credibility.Rnw:208-214 ################################################### X <- cbind(cohort = c(1, 2, 1, 2, 2), hachemeister) fit <- cm(~cohort + cohort:state, data = X, ratios = ratio.1:ratio.12, weights = weight.1:weight.12, method = "iterative") fit ################################################### ### code chunk number 4: credibility.Rnw:221-222 ################################################### predict(fit) ################################################### ### code chunk number 5: credibility.Rnw:227-228 ################################################### summary(fit) ################################################### ### code chunk number 6: credibility.Rnw:233-235 ################################################### summary(fit, levels = "cohort") predict(fit, levels = "cohort") ################################################### ### code chunk number 7: credibility.Rnw:263-264 ################################################### cm(~state, hachemeister, ratios = ratio.1:ratio.12) ################################################### ### code chunk number 8: credibility.Rnw:271-273 ################################################### cm(~state, hachemeister, ratios = ratio.1:ratio.12, weights = weight.1:weight.12) ################################################### ### code chunk number 9: credibility.Rnw:302-307 ################################################### fit <- cm(~state, hachemeister, regformula = ~ time, regdata = data.frame(time = 1:12), ratios = ratio.1:ratio.12, weights = weight.1:weight.12) fit ################################################### ### code chunk number 10: credibility.Rnw:312-313 ################################################### predict(fit, newdata = data.frame(time = 13)) ################################################### ### code chunk number 11: credibility.Rnw:323-336 ################################################### plot(NA, xlim = c(1, 13), ylim = c(1000, 2000), xlab = "", ylab = "") x <- cbind(1, 1:12) lines(1:12, x %*% fit$means$portfolio, col = "blue", lwd = 2) lines(1:12, x %*% fit$means$state[, 4], col = "red", lwd = 2, lty = 2) lines(1:12, x %*% coefficients(fit$adj.models[[4]]), col = "darkgreen", lwd = 2, lty = 3) points(13, predict(fit, newdata = data.frame(time = 13))[4], pch = 8, col = "darkgreen") legend("bottomright", legend = c("collective", "individual", "credibility"), col = c("blue", "red", "darkgreen"), lty = 1:3) ################################################### ### code chunk number 12: credibility.Rnw:353-359 ################################################### fit2 <- cm(~state, hachemeister, regformula = ~ time, regdata = data.frame(time = 1:12), adj.intercept = TRUE, ratios = ratio.1:ratio.12, weights = weight.1:weight.12) summary(fit2, newdata = data.frame(time = 13)) ################################################### ### code chunk number 13: credibility.Rnw:366-380 ################################################### plot(NA, xlim = c(1, 13), ylim = c(1000, 2000), xlab = "", ylab = "") x <- cbind(1, 1:12) R <- fit2$transition lines(1:12, x %*% solve(R, fit2$means$portfolio), col = "blue", lwd = 2) lines(1:12, x %*% solve(R, fit2$means$state[, 4]), col = "red", lwd = 2, lty = 2) lines(1:12, x %*% solve(R, coefficients(fit2$adj.models[[4]])), col = "darkgreen", lwd = 2, lty = 3) points(13, predict(fit2, newdata = data.frame(time = 13))[4], pch = 8, col = "darkgreen") legend("bottomright", legend = c("collective", "individual", "credibility"), col = c("blue", "red", "darkgreen"), lty = 1:3) ################################################### ### code chunk number 14: credibility.Rnw:509-515 ################################################### x <- c(5, 3, 0, 1, 1) fit <- cm("bayes", x, likelihood = "poisson", shape = 3, rate = 3) fit predict(fit) summary(fit) actuar/inst/doc/actuar.Rnw0000644000176200001440000000503415147745722015242 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Introduction to actuar} %\VignettePackage{actuar} %\SweaveUTF8 \title{Introduction to \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} \begin{document} \maketitle \section{Introduction} \label{sec:introduction} \pkg{actuar} \citep{actuar} provides additional actuarial science functionality and support for heavy tailed distributions to the R statistical system. The project was officially launched in 2005 and is under active development. The current feature set of the package can be split into five main categories: additional probability distributions; loss distributions modeling; risk and ruin theory; simulation of compound hierarchical models; credibility theory. Furthermore, starting with version 3.0-0, \pkg{actuar} gives easy access to many of its underlying C workhorses through an API. As much as possible, the developers have tried to keep the ``user interface'' of the various functions of the package consistent. Moreover, the package follows the general R philosophy of working with model objects. This means that instead of merely returning, say, a vector of probabilities, many functions will return an object containing, among other things, the said probabilities. The object can then be manipulated at one's will using various extraction, summary or plotting functions. \section{Documentation} In addition to the help pages, \pkg{actuar} ships with extensive vignettes and demonstration scripts; run the following commands at the R prompt to obtain the list of each. <>= vignette(package = "actuar") demo(package = "actuar") @ \section{Collaboration and citation} If you use R or \pkg{actuar} for actuarial analysis, please cite the software in publications. For information on how to cite the software, use: <>= citation() citation("actuar") @ \section*{Acknowledgments} The package would not be at this stage of development without the stimulating contribution of Sébastien Auclair, Christophe Dutang, Nicholas Langevin, Xavier Milhaud, Tommy Ouellet and Louis-Philippe Pouliot. This research benefited from financial support from the Natural Sciences and Engineering Research Council of Canada and from the \emph{Chaire d'actuariat} (Actuarial Science Foundation) of Université Laval. \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/inst/doc/actuar.pdf0000644000176200001440000005524215151412402015230 0ustar liggesusers%PDF-1.5 % 11 0 obj <> stream xڥY$7 -:K@lLf89?$w3حA||$5pw}V^_8v?@c}8 y9]8Yk Toq=ԟc6zt>5oVi~,!T]P>*WV=][y\@ƃc5c]TR>wiڂW(.q%ןޔIdvGMXvB.4RGR[ Z䬰yj\> ."!{f! 5uۍqn,I^'Տx'GOnHtdVphf+ 0nj=D+X.7>h䵍 R@Ǩo3")_y r&.wdPAs(/ʹI}ޗ3$onwcyElJd'P>9MtREGu)D ~4Y2O[ugbxfFo#O1j07*sfԠ@DGL+&5%#p<)SՇ r;@sqR`J4u-Lzoh}r(0Ȯ])|e^0滪 mi袢@(d;)>q+q K"XuQ9TN{ꫝ( ¡\D-Y׷qC%'5[ k晨)%'}A2}(ᯫ]byȓ9H)w&-tĺ]l0k{ GERbByZ s ԾӊW|::ұO{$kr*/7f jCJ Faj;QJ"MHxCB'WG\P#pNvwR+.ǽo^^F.+qs%\|*Ȃ"TܾB LcBƴLW) 1J!L.,}j='sg J)&uEG$>lۤeY*&`sTYBJYTyw֞XBI*R(Sm7i$Gjj:mK2JGߥF.S,w(V}ivnC_ U뮲vo n:uljmoM;QY[a9^rHU{Pg-9y*Bj*#k5pޚ\67n,H endstream endobj 20 0 obj <> stream xڽXɎ6+MWqC@nN0K~?Z(JY# ZX˫oN@8O0]'_~ig]t~߳w%3$ޮO//R"6hlC.0Ք W4 VSFҩN\8x82= 4fz-egxgOρmW}ۺq=A\2")O>J0 w"L_g0at!?Ȗ)ȖDRӡ)ڄ'@8 !0e{=|m& eߣkt7&"5,kf2ix :V]qFAL+9;P= Cx.FHu)*}\ΪLg$o<=4)̐y3y;L3DmV0^%Cg.8*2m wuzlKC|\Oa>f  L/޲e1ܺnB.˄GK;(!9?d0|PP#%bޠg 00Gt6+ lqɱӒFFC (֙Q Iu8igʛtwQ,. ȳbMKZ7 yĤ9 V1%1:mU,lہ<5T[ o ~Sph¸9B7@|fӝ.]YQƓL',.E'۽Ά5*rWz)Xہhv77v6 .MV#,b9DJE 㔭@p]N\P>0Lz" `n3%z<+o\e m6Ӹ`Lc]aYDcQy`8O1lF]TU1Z%K+B6=mPbw[^y+.{+hsZ|ȂGJ)cN̽U4G>uIDnkr$l\W{dxl~Q$~ d|YU.nߛfܱ6=y\`4' {⾽䳴9;_7*Ve adž]c"ݕK55|0'Go.l"uJzqZEvidtpCDz$1m9*ɮİc賴But{_)x [\oN;ǐ]Qo|VgFR9-m5jqIrN.W0^*5aGQ>3]D9/͈zz/E|T%TƼUۧ\] T ~&y*>F%*R)+-G+˼A1MӀos&6.$BX}n3ՖO+ endstream endobj 44 0 obj <> stream x}Q0y(&V"Xm]V ju(lڇ4&3'?/RV0}: [`;8u};9o{jF5Kso9YGgsrll[oĂu':ˢ'8j?c7ayWu~X۫@y2G8ǦݨxKn-7I=P~HQ}oC"$!c$#hi!P)/R Hw$b2NCM]s+BGyǼ,U)1ܕQ4b"yWx쐌]˄ߑ$~Ẻh/XD5I RTb"R!ȉ paHrA{aDDÄJN+GthAvu݆L38v<{k5/X endstream endobj 45 0 obj <> stream x]ok0)cڂ ºia{ 5 ѲOla ]<ݤTs߭tJ8>F]4q͡0Esᷛfk}c!PQA~5mvm;hRUicGv&+KxboVՉM #Ř34:9Ir -5[8ׯ~%(p߅ODQH4#Z- i#  ""T777m?rkDKJQ8ZhK:4L>\q3>Zm*[W endstream endobj 46 0 obj <> stream x]n0 d߼gqs]°jtgf{4" |yh e^=R$C;a496/i0Bvds*TwAsGW q0Lv$޼RJͿn-aFhitD3K@-һ]`)Z+4J1Xsi" B<Cv(PQQD[" ýj ,=Zf-;m/n!yGK}~G; endstream endobj 47 0 obj <> stream x][k0y^\AֶCإc1Ĕ~&ǖр߹=YΕ$ܙV`I-UekF9QEop+o~얫Ǣ̿ʟ_;-?I1ޗ[6'^^4h|]\: M$I@H٫w\hYxpwSHF P MD[A'%ɶ?i3iZ|sg}8I)CZzb蛡Wt4g#=!H RF mı/v*xQbsG]EG- ȭYq6߿_Tp{".  endstream endobj 49 0 obj <> stream xڝywXT 9GΘ"IQ{E, aDK P Ĩ"jkrS:fs/_=|1ϙ^]]kgĢ~DbxZ5SxFy; #x X,XQ&hl3 ~X$3z=H4IDFǑ]fco~3'2xB8x{z/VGGL2a0XnJVy[X:y[.VSo maC)>cbgR)%%D\d,US5r@̀/V 14;3aPT!-vV 6$mșea"#g+̧Wwo`o=|p_9"vAaAf Yމ v͹?Tv'?CdS)7h#mzפ t+!u{ݠ;Pur,L#bEܟd.JPъAV 4]0ftOb;l5SکUPi@4U_1IOf2uM[T!90b40?k|5 3U |i^X%5n03܍FQ\&)5..̧iAH2zIӱ2Z* F< `̯ nⅉJTfvLNh9-rMtv _2]V=K֐H"^I`%O6w?V/ dpA)O˵3p=35?| X.\ GO(,MOJӋT8ڢq3yYi/jpkvhW|`zT87B?Ŋ S RjՇTr_ q\4(`킌zLO<յ܏K er:qYd[ 1!նmJ2fϰFa_U{NKf; aMDHV "a$n0a)2 bD7iV6ۋaGwvU ]^{yd=o%-Gp.D8hn+·(N,,DwJʻFlmυ~y1a(/0a7maw>;/)n2W| kD|zt Y ٪bbhlB[z Qx)\~)ra'zkM=hUQ4Ln^V0d(р{5>>fN4qe?cAqтMo=#yIW+$a# Q @A\&J{d 3ژ8CN. ާ~dqwCFc^rB~~QZ~MF,XQ d&9,׭B+p<6q6M\Iʩ(ˡ1MLWO  0ZL -myA0[<lY)G’$ړ:u[3l)ʿk?[/ mmb~G=jP[ALf ig &&`uֿt7.)#f2'} ˍvƽF39WeW܏l5ϋa骤Шۂ47 Z([b{Ɨ ݌suB]U7%~{ La.ۯ{h~M_d艉n W# e`!⮂ +q5"8]ՒE+t5`Z%$ܷV']n}7DOv'C*z;UA^hL ¯[M,Ș_9Ofa÷ݲGL:Yqi#/zmf%^N|1CA&O!>haY%rW*)\^ 8$զ*;k(T _T!U+0rX]}=Ѽm%Kb8`X|FrU)qZM\(Mg25/#~Պ}|ԩثyG1ŁĎ3Ce .;MZ\%e K0L[D3/"J'Y';θ\Zsz=v!Z/7ۏ,eo^D :Q]q3 ʨ1holj[P_/.Wq!`:q!1 W,p؟qʭwxRW0&KsF# Ox﵋`USܲ7e3{%Vysps3K^.l>|}"⢪ӢMk7ܕS`|f%ʱбQaQuŵWJG 9qRw \w: mg t 5EDKcs"n>°*@I;Bь4=۷"*lx2SdG?r? <}Z/}pD7tbMpom{v{YG/j:#0wrƼ+0r'YLT;-Ϧ=_pBӖ_+589dN^_{$F;.Hj]Lt!]TQVj/q q)^VEv6iFY$̼9 l=0Oӵ&YNp7-a-ZSMd3uo`hڭ ev7KNB7_*Cbcöa?_jZpP#7Р0e/BfOjux;0WB%wK~Zb\BRz:&r2ISJUktߛ+k[d Ri:կa 3*)_ӓDkٯz{/#}֥W1#}C{cHk&hEF ;/mAΙOgjQr7}t;ԍ,6V H˓rG2(-O,8/w5TnM15v1BF8',Oh%+ھ˶Agw3\CK>켜E)͈1z4?sW 047Vv&f,6.Y@I)Mm7>S 'S!;?zBfG>z` xw6sӗ|O-3xi( VaW53P-I͎.b76(_`X,|6saUAk]qh [r PSذ'ћMAkcg02ȑ몈ƠC5R~ӹođnW[k X s.a Q[u}G;WPߠ,nirU9l0Fq,WD`>,oʊI}Efia.y? FY۵7oBsckE{jIN%dmGi+L+izͦfC[]+\g):;S 5EDAJQih v'۩}^p}e-цR|A!~J@kA|?RM#ΐKȺӭtf7"쉱GgE,\N{氜^f'2&}z peȩw9/1'ˋuZ2_֦ȸԤ4lKV0lŏ Iե* s[ZF k0LD/S+R탉 ŀⅦT9L(pGRc(,YM1bτ`&(]W(.mjY%QtteR"i"L#9һK't2ҏ9am^4<d C`wC_uz]F2UU4 :6 LJ`7'zS dT+% yAD01#VW@_Z;4߇4rK=)9;It-aGn{!3keI@˕ܦы0<+aӪ+*i͡5Tivdhw!JM1zMZBEIAnϕH5#0L 0! J[_ITaېK 7Կ#}jp;5RG#ݫİ0(yƸ)^CkZC@ɔNCd*#%oKm2s уfxieeZdP[U}tP&%PCgcљkqI[^UXaPJKgW*߭!8xv988~H= p1/< sX0\xQ}a'&Kz3>`0k2\Vř"Ca9>qՐRuq{xqWMVy1/ kCZ _TLI"Dvύ6R7 ^oԈ䩍 _2,/䵳Fl/+FNTOaĊ$OwVw_rm+GDž20~i ⾎(U6 RU%F'O #'u V }K\}{ yxcGŵF|4<-'oҦdiͣU:>i?0{¹&k")3a6b'mpQ|QYZ9X_lك5 t2!km\`S狻M%[橳ԉn {'mO1,qqfP^݉p;NYw+ 33jveDb`1ك`8 e~f}ENk'Ϊ+G]޸l8L)?x-sEmOorNǻ R[WUŭUX>݇JdN56/*?o_5s.gg?<\e3izް6ehbDTh16b!y`rQ?QK[͐r+g`xDŘe ]X&§%6utǐ wa$X,'zmxG~Heň N> *Ȅ+D =!ߞ?hMmQQBiC)9$ 1u2L`ؗwV Tk{O34:i%w(Ir{s=N45Pܵ0حrugӖo S{L@b45mYpZ~cc{vmMu4}ug1Rm Ai6=H4cY|<–, XH#R>@?HL endstream endobj 51 0 obj <> stream xڛ~܀}=D^8afEe endstream endobj 53 0 obj <> stream xUW XW. tuiV"AT - 6F]di@Q" %qItht4.8)L2彩~U9{=URD**}ܷ$i}ɺEꘈѡTqZ KED>Na&ښ22mtT&m+3M"cj?4AofzfIƏH%2 '%*I$of:TvU"t-^boZ%bCUc*(^cT0FrM{ujS]|DHP?! b#b$n6n_|DFZbox%?pxm$WޫhT..9/s_|Óp YjINIF;ďL#Vd/34uFPHlkhgѰ b3*ClOwhJbJ~>,^@JO\Fo%a7Rs3ʴ41ѽ F=;GcVF2L.MPO3n^ү6AAO:qL/yuTS= 4 0ca׽0N Mf <=PK7Q;,UB|PAMZeώD)6<2B-wf9%b ōs(EU$_⯷4 &5m>O(ư$Z_ʞK%݈גU:2Uz> mt^kwkW yYE&~܍ey{"E`al+`M&T\!C$;,MՌ!w̥UidՇe:UDEFD>k)Sd,`KK3rRc#HKe.45} J@k@iEzLs'T0Qa1.yaœ<{Ċ'l]*x)Lsc }WOOyoe/}fpƀi5wՔfTQegrçާ֮ R`C.[.UG 5DX ^.Sx=P6B9u$ 0wߕ_PYT[ZgsW}M՘נ[5=SQ.%f+:\ĝSR}~݉uBΫkÕΫk'L,+7_+2@ZGgM'.bL'Laуʌ! (2/`q՟z32r0AĊ=dNO5z?4EP\),q1ڼPa4dbP(/I&a rNnϩσ1Cx;LV-9IJ hCf/|a[B.qRP]\Cu6$쉂ʦ0TQG멣 ,s7 56c6_ol-%eM~.Dn̨x|9wgcf/ΣՆ¶Qؙm<.jg9Ԗ7:l1Y1 + j303X yAV>rȀb͡0O _ܼ۱L"sb2fU}bAٿ+ sZxS|cSPi$`^!؟%M&`wf؄`=r6th̝,ؚf%\GhbFZR//іv^̕\ꊳʎcz;3&rrݯc>dK#j~˖ D> \{0ePľkξH8}+#i|JKHM0Y)po215-3oJllO_.ȿn` zwmle9;odڲж+^kYFW\o!9(J'y7;'EYhz=| &mL *jx/JYh:e+QsW klZanҥ'd""?*vI3S}#Hvy.@0C]aPߓ[{ i )7`ڣM# Zq}e{8KC|?b޺z)3_yѾxk5?>*e6iXf(V韒d}R`!a6Ւc!q;AnWs`x[1 endstream endobj 55 0 obj <> stream xڛ&0bN I& endstream endobj 57 0 obj <> stream x=W TTG]"v:a NPq5G"D k7؊41$g<6YL" a3WA4F%8fWXTOz[{բ ([i1ۤUW^":_#5 Ti ]cɳsQ4N?Gq|g^ЋSE xٖn4[ry[BBO2u5˚g}b6L-vk 6^o1^fgmV[)-lZmɶdriɋ&{1y1j7W >0a' 0RPQha;a &a.D2!Jb aJX- kx!A#^gg[BX+,n{C"4iFh^| &jhK%_)N:3\'d'LJNл>}r}J؃^=F{~憶6XG5G;鬱R>n 8VulNũzYəsPTj|[_v4G k Lv(]AQ~\|_u=e{:mnb¡BBbd`ݬ\=X<$b\(}"znkЊ#Ijؘ`+OLA.~0mV"t^Oy7э]X .+bISPv gEeFP4Hm *y5V37u҉>gowz="A~s=mڻЉ]"E?­/*s,9@zNf!u$}Eք74qA?+Yo܄z\uaKv+oi]Deos(6ݧ!cF_|e=W0ě;JFp0\m(tvMrSc5gJb6nʜO쎲8>pȩEU;L<b6A]$Wg6s&fc\WtZu)F.E^!;Kgb:Z|L C1T耒&8c,[< َA=b]ιlGd[,쏒>=~ j<=>%&LPu%/;$egMlD+;]Hԫ]vR-K^K?DT;ԣpdsi8)mXQO|jƦwM8M=ʝII)#7`k}>u-E ;jHP泡b6ih8|_tH,SC*5TȬj;P؃/nm7T8Xbg p K9x{6{~Z6xM#)Y&Z| x)w6/>\)$Vn^ mgݬ5>Fɻ-҇cqL-5ϖm=6lxyM&髜}sCMGqxBE¯VdZn**[Q*Tv iqDvqe1ۅtJxy+ek㾱B/`};s_3, yYHHr9zy>)_xw@;.b-\ϖ$?|Ghr?8C (2ܨ g)e 5R+l KUFԷW1 /IH0%f1mE?6` ɀY|:Lhj$yI.܍R{g6xhWb vz5IZ-9 ڰUù]J>s 0 aʥ (Vj$HXaN)b[CT6l۬Ef s>oW?dQ_sW%M[I3,+@V/%斝uW7V"uI/maSNh}Nry%lTJgi\G|Y.'{"޿#3o,zo$(ۋiy1N,+1kZ^A%%e 5h^us? endstream endobj 58 0 obj <> stream x0A}"" endstream endobj 60 0 obj <> stream xU{\W3PQ2LImV-ZZV,T"'!7 o/*jUuZպLlu=9w{bN3k9{Fs}vJx[[ ?_ތ{rDS)y? x `z JaUЅ{u u(( _#T07P*`ExMWDi6-ǐ딴5 )h6ˑD- uס⮺6CBOIL2Ÿ2ϰSE'a:DD׎_:kХ)4eH |u5hvI#Lf[YQ$ Co4eQW+d30/V9P8j%%5 e;)ɘ~Xs&Yi Na?JN4Aw-d*E\[ਨr^}k &; }afYߪSdY ȭi!:`-%Lͺդ׵ܑ˟ D13;X3T>߱hNSED!%52zdnʫfY_:0EÜmHk,ҩܔR$R G;-< ҩ؈gU}8$4-F;tT7:~:If0W /cu\w.w $C|}Ɂ˒bU˨ja4Zh;5}g›#)F{uހWx*:}0SB֜4DYk>1,-=%!VXYQbg 4-W [ M9F;9^]#B rGor ?Gw7'5 7!_%$-YntE0L9 6*lƇ%V['ѴZv)H]0 \ )ߌ} W7: r?;/Piq3 C,W[04O^X50m?U2_]gI?:<1GDoP3laDv4~X6>TykeΛ+(uA+fh}5aF`m}ݍj&q\K x~eQGҹ AVb/LpUԖgZ,9ivy?/Q{M,0dwl1%I#l9zu]LtzC>GzdBͪxʺ\Ugi&"WU73L9v:m3²&57iqgڞ!^'N >`ɨ]2YEy5U x]Bv729f;X|z!).\i`_Ǟx44)& z$x)PrHrI2sͫ/,\Opv\=>˳ l*Nv^[|~[h" q3 0z0{ifq 3#x\^,O-?~s)T~jl߫QYK +ϣ03͸]m`n_{{GN uo>D 'i<qȹ~A>@ԃFS\l׮6Y?;bzk$%6.Qn5 C44Ķ^bn]}Ia$S9eN;s _^uDx_ I6L9N,AI&FfjܜTu<~a 3ٴ!tښNu粒v?3oU(jӮx^Iђ~kuV;s.JQ.er+m4[ـDP Bz}?1.U@wYk!~ R4M,F= 8Io봆$:HND :qY w=Vh{g6xـ@1JA^eqn,F1S[m۹a X_SrHU%d%87ۑ,Bo*t0'/wG/,!#2 * ع ݂ & % endstream endobj 62 0 obj <> stream xk`aŪpX endstream endobj 9 0 obj <> stream xX[SF}ϯMY%EQe def0aɞq&/%&f3%,ik 8z9ŔfA1D" Lzm`B8LBhvhHGq%bҐ 9bhBYcװ iPk,!^QIf`rAtLvy| A3'd9g5Jቅ-GHЈ}3yKlw7=h,CMbt\*W8=@GPgmgyL:&i};Y 83aUb(M'#e !L7ҌH&:$eM ,H!xU9=uS'x6m3Jڃwczv1Mgey*xBq鿫*ȥI S%`]peT4R/|8Rh etbqnr_⺨l簜ɪl #OL<z6VchIĠm僤nAbYiP!BEII̓HZoeĞD=YPXO)1?Y&D(6z,k6c♵)f"@x4M:KtXOg5f0[6\bXBYdϰ'v[kX]5 vXԋ{//9Ws9ׁs9w`&9Kjm&=d55dW.;:3< +ԀUH1o1FuƨNɴ{vi"Ęv_.Qk]"6z<*VUvxR]J[FGm:UyƊZ aNNȲFD[fh-D'<<#\/.pzo9$ӯZ-+㬘엟'd~\O{gQ_+\ WK]Jb(.gӸ4X.??v$n2  ᤎ;?bʢΓ\?CnAkۛ5QQYLUm(GCTѲ=L(b871<ԓE9o1s&(Кr AaM_:WOi9C*XFݲb\~/[che"ÿ0b@yb>ॐqO c^'?`.巣c'J2=XӡHx  @ba7D%B G?,H$OCm1mĿ [)ؗדhm 6ĊtllcGB \ClZ,e)Ύj) endstream endobj 63 0 obj <<8bfb84c54248575e39a268659769f179>]/Root 1 0 R/Info 2 0 R/Size 64/W[1 2 2]/Filter/FlateDecode/Length 170>> stream x%η A@wDJĐ u ZBBRRدIhw/$ B0!`*UZ9aawQHAb> stream xڭYɎ#7 +0| } rҕSBQD=A0SmJ").O?8nӯotϟ_m=98b:8c3cB"]F>;_&Ȍ"=E;3*sɢ*iŸoV[H멦Q埈AfZacy!K$)Gjb- sbm vjY҂9UKvQ4@AE?Zʪ#Y%3S[NLP8e`bL k}8HY}1Yb$ʖp !umimnm/֟sƟ0^Y,1Ky,tJMƻ (-9n -9GO5JaL v~Iۓhvz9f6%%ȯnC :WSYvZåhm&IV˔UwuK%KtP/;EA.B{hxl2-R/⌆`1g]kÇ9-e,y}#{<즠F'e$ |WWU,Y i=쒓@ c D{Pr AOԊb@ܹ4eX; Yq$[. -aթ&uA XA/_NDt@<^zd\KeK`j]-;~U""7 YH"4lq;M 89B^;Fr0pjL- lcy*xsӄ؊*6V`57-`݀5~A>Fp,)CPNRPE5Au-_nj0>[[8ɒtSyݑkqOn/P}TP92LHj;7a)f.]Dc)WMf }`xjSG6Hn(!/ I@7qGkv^Sr|ot߇ۻ=a` 4 3Lq)wPۦWԧWݩUƍ@C 6K9Df\#ϞDQۭafNihP]l4QzD 1Zu=?(;(Txr %ֽ"&CfBC O>m"i @ނ]6BuF( |{!|b$c1ϔlM]9ĹxF+0=gj5N]'RqaB&5lPxղx\E$8O'w5a=T(ymYѴKHQ|UtFWek}X!-> stream xڭYˎ6 +WՋzYhtvvElEE]zia_")vW&?ɜ~_Ԝya?&< ˝Iv;_OBH+|S< JH)<NJRW<ޚo!wvJZ(%>S"/*JæSb)<93h:QkkBF5gׯθVݤ`BJy4ϷbĨdan-)T ٯ^Q UC)ca=%A Okk{i(Ќ|`ѷe}bjІ?z,@n)|B,vlTI,'(1~:ZT-zJ[-^x矌6ݟPt^קC8&TZЧ~Y/>~P%hLiߛT1NYۊ42Gf֣FHu\q! pr!nȏ1,pA%F؇~L6M$TY&P϶y C[X1mB`; `Uut\A1Yx ?cfwyaY3n[kI{Ee Rc!*in9R׌"%&\Z.i?F]5|@<.'wz?AL3 RZTCzӈL1\v^ӱתq\*W;k,ATT1iw; ju &Cc+SLي^H 2ՃكvzŠl].\U9TF(MT>CU~rN9,C JvjkLgav4t 5“;OC+.øѣ#[.bǎO Sy!B>}҂-V9.(=9 endstream endobj 51 0 obj <> stream xڵ[ɮ+fXdq /$ dY\O٤M~?yeK/x-Qb ,bǗoӏ (ɌI[fqƺ 8g8pCp{gN!i gz9 )tH4kOܖnt=h_qX3xOL b&A'f$Ѳp3{YO3'q,,9pX(?*, Ќ4e /X68fC]ϙ"48CfQ IJBW %%FPUEkȨ01З 's~9d魙^@M1E/CwrZUb!6O4!pp=ePX 6I\S$:PO#=IzZcfz)bR\pȒ6~KGrfd.~V .1>l3 Cܬ{KȐIչ:d*^FZd}UIk il[U;A8Ҍm느t#Sa" ۱<575̦ykĴ ۦZw1@#jTcw)!:4/72gY(djr\JF%Z$0nȤJxo1e: `G:Gm!sk*2D_9 J8dSwW;&(<A ;-8D*͂!OHFTcO$ݕJyZ.hH CiIJp6F7֧t?55}QTdãQHhBP#xU࡯c{jNaBD>,/HAYcOqt58lm_Zc$I%ע $rDy^xS/DӗL$Cqt&ʺgΆ`1%GYG*@b,D[zC,}E/iXں3ݶBS!2Uhsf1h0\ǐt{BIT|D'tmJ̩|* ((ƫ6A1չJ*bPEdE[ʴؐ΃ZH)2\[I5" ,ʑ285ֳYzhygnF*̝O833L9V27nu}* Rc|HHW 5)1 Ě *2Ǐdwh h( i(-ȪDU!ۨk2khEdn/šetjaQJVkrhMZcN(Z+x!ץ~SCݸ(ro[m%~&'ȻVft)`[)!Rv2 ٶ'yfs 5=erXkF E^烱|\Pvh{6>p>S#å#ȼТ!x>$GV-ֿ[D @4Kfإ+qgfOH ɔc__!mXdns` XpXE8h ͔@tkL t[foEn3q)Ŝ2{sRu b``*DRH瞦g<[ZF瑆Kz$tT ԚmBdqGh$vg'l~,0wu6$gG&mRkjTh>- WQeE#aJlCG(}㠖햪ilJ9Eqjc|JM8'm+.+p))Gb[0@xntI,Ĉ;EW/!? S-jpu)Ai[_N> qhgsRDG ykӒ`쳮瓖S_zRy:r}٩6[(ô1ʗ!#¾鋢ez 7S#MϏVB3hNG 92^ MFp^^&4Q <]dj[48~n8 \{e!HgVUY؏dq+L1IQJlj#·|xo5]D}RB9!xC'Li[!oOEny(J:6*eb,g[KʧBc9"C;HmNg E(l7 [D Mn61WjUE թ sy 0@ݡ0)NS q?ܿ816c jkvb5Gޟka{dù[ +]|"ڃOflʘqǸ9#y3//ٸ>ɰAX|N1r 0lⴻ\W4!2ŧ|FGdMuh6bjC`̶anny2饽 )<5Pyʲ3Na2&1q[*;fbLh|F8ø2FF*t8֮CY>e__m:I_:^]NzniX؀CT N/B}Cl!N3?&%iemP;wM)Z 4 Ф)D!K`N:%W8c؞mFB fnG1~ Ygu62X/ e%9AfR7}ușpWmGW1HJ;w{Ɖ}o^0 yad+oT -N);!N ,Ok6aj ZbW ΰ'Dѩ%ܯ90>!o;]YB5e:^[Y`L-5#S!`3u Qgm^-38c֟ N.3GDӧY7^Rۛl$Fofܒr7cn^bw-Aߞ(*ߺ_? endstream endobj 54 0 obj <> stream xZˎ+Ⰺ,>LYt.f)Jæ,7ݴ"Nzݟu׿nwȚl3u8sLܽxӒ=ŧX"^n 'xEdxIpz'ևI/N$U韯byKr8͏h ^ &{<Ȇu*#SOĞzlSJb7O"L#o"U"ӃA&gT[aO!b|%MagC~ox}3gxxckDRcd8jR2oݟFO.du?:duo W{bP\z|{++rT;ZX/ jjڲ 0q5.!Zb|0a~;Sߞrs{VkK@ 0Θ\ Y!aRzp-mqM85Z&hRR=.<\=%j ƃz /ogdrqoedNZ<)*&(3!zCZj7XMjy  axԚۡGB 6:D>wXc_Rҍ_RTߌ1u$*EНD;sCF~t6AZV/SnxZ#,jR5蚧gikE`U9-RF$e[v $5֐ =<'jQX@n4+԰\MgKPVHVi6uu*zNH^&/5hq+H 7 'V$Q엖\`; 'P2U0&HƂe@ \\rp?S퉑j<5@Cآ= @Yͮr5Fu?gzD0Cƒ}%<%B7x74Pa4Bbei't{{F8,~uAunئ7Cps]Ia\ҶupuҬu tW[zQڨq γp> stream xڽɮa70!@2@n-yK}O"Q[U˷M` HrJwO_?*˗ q]N~U֏} ?NhYtYсW?,̏GL{0;2 ' pKbä93A)C?䌽Q@b@yr}} fqv*؆_o .*~!~ 8"ʿ/f총` zPx@B VBp %LEeٲ r gZTbFWU_ ݠ" Fh&Hr- NqF;9ڠ?Y|t`*zLp*#R Jm[zep%H hÔ=\& j79qza]x:a59Fx3AMG@, 챳%)S2\`S ZP5oS8f1~jft߈0ňä%w6qF`3fhas̚:p%/7Ax9&1D8 !Wl#!p)E."SǶ:9MQv_6Ȏv9?OiltaD~Ӯd.pmޙxf d`]MB ZBGϥ*pV爥0Ou<(.@|Zg`Ӄ+;~]U.=R \ۂkЎj!YB&F=tot7g]+zJŇ y~?ёś=DӓS : kre9t_Ek/}^Mll`)Xdv.?$~_KLAťKͶU;)ѺfL ~H/~Ԏ9* ws5C&,3fMXCƗ\4Cwc8ֆOTa<9w'.JPRRP]' @2< /05*: Hջ5&Ab! ht Gў0\з(!*Hkt8i2z@Y$N!yBlLϋxg)4]E],]rO5{w0"AVѤTl*'ʳqI]I3tjjv%]]V=e T A:Mmչ tik12*r]Fd$r.ߋb/2dA%0ss"KROçf nCu#ͷM`qJer.\\+\[EXhʈE9 ' \ʇCN9C+mdq-cqEXgs8v!>=']t6Ff!;PA{-}8vοVI*I A0zJ{)|?H0VrʶcLSlјﲂ}$q`1{?dTXv9g(IRn".t~b@tdKc]|*DVRJJa)扟d{eΜ̝`jpi*UV.65TMv*wo :!.zۛx@}~1p4~.m*xipmxI=>bUD=^4ky CfI87VʛƝcSݥuWɶ48$󓡂@%4A0߄̵uwI1131/mVN3&*4וTj'fSKQl¸iך7[Qs[}h=kMxd*[C5]1 tae:1@.=GU:v};ۤR qRZzZ-=6ޅ>-2yTIؾx²;a5;Jn:;xM8ЩJ >kQ˵`l<4aIЍä&W}ʠbվUts%iĞǁ>Q߶{{~/?iDܦ_/OdWYij}Xe [<6i;VϒsM̤^T`: u7N:m̟r;_q =}<ݳg6n%͵L37Oҍ}0ܮeG.9ԶoGدI?1{=.u|q<(/ -. . S~VN[vr6m`_b`IrY*~3x尛a٘ڧW~t{#fbiJ DW ?,.oE{\l~ {gJ' endstream endobj 65 0 obj <> stream xX͎7 )V%0|(-oEKsH.}" f,Q}( Y5_tFY]  ^,h>A_wr/*Tc^)T/?Տ$Ut٪U堝KN;qƄ>B)8j;c.+Az·'jKcv~2?;t-}j*-G+dC׶zpjxx0繍)baߢ4`S+,$ h$,4mZ20̸ ~l=+ h\)#(KY8H(48;<ioV̉n, )CO 겯G&}R`/honk@=V|||Ŕp䈐!$5XEЃ{y|C$|.4FR=vMXY !ʯW ك@<\3`D JC1[=@%8 U Ceж`>dAra]VtA0(; &73%uQd?avKc\5FT]Ķq*I8וٚ,kLtӑSXLXU]* \6{Dw[*DH5]=բ|Pmܙ-"=-AJڅFZn-!ve)KLU鼔J ~ d ol3&#CÑ ޾Ɗ̖6|-bǍ>%}fBuD:FjV΍ui5x4lvp7fW*]Q(j42rTe܉UNQ:; NeORWIfkٶcpfBn uSG$W`ZQ[&#;;Qrl61N 07ƅ2Mdlx2" aiLDf5(_P%G.{ro#,&"Bx3#5@g1z5#э[FaeXUvFn@ɹ}~Y Mݴ;U?j?>3CĢRE,?]K endstream endobj 68 0 obj <> stream xX͎6 )VG9hvnEL.!CI,IlK$Q#> -i + a{Lɀ*ɐ@LZZ72x=jE2F׻ˇU/-_@/#"~#$Z\nkDΊ?'W ?-۵9`s{R@/ ڣ RID:Ϲ>mJpOR1M!Y{mu"k~-RFy{qת: 䯶iiAņt/O[ ΦMD7=L {3YRD&c HmZ;(p a1@0`"%=\X@yÁ&`y28T2`pUX+/m 5HsZ:'}9J+&U2u2s'vȉ;>1-t{ 8>~H%W)aq$$X"dQ2 Gmi66LђE}8,œ"\K%Jvs(T*nE\p·4#WlM=5#xdAly݄v# <6qlz`(i] Zfi[^63柪>ptQ7Y( <⸗69U85,k[JꁦV icX e:"vYmarp"Ѿx$Uč7]ˌT2e֥L 胚C5Er ?\tVt#mėn)^ #,|4°n`%WJ{l˚D֎xiS[.ZYi+YWL!lלqf֛:\vdwKz(O\lqYfqS-%<^L -|à6`^[{Or)YVBKgDau\`E>} U|V$ endstream endobj 73 0 obj <> stream xZK6Wa@Ç6l9v.%?_Ev,f8b^f:dk?/`M)?M*a:P CO3Ma`vwN! -]&F7lX/7xxPK5e5`k&7n6*+bZͣS|9!{=x8s5^ f /g d*O;G uzZd98OnQɧšk`롯}~?!0$ȃD3.Cua@UPw<~.F#Bg~]?c!; x ;81[3ڟQq-aw-kue}G]:wmb\9^{|zӮפrwE\x^s]M -%هw](%ºʣD&pǹXWf amkeTc<\s/K?}\̳?oZB1[+6 Sk .;k@|NKf]Ut=u== A'v z5` w}YZ~揿T31ވTsgk?JtVҊ"M|8vtBJWA%2ws`#xYz,>ȧ ٹ(2G๑*#\-D Do$\ i K݆m6+l#9l)NO.cNSQh!KHGFJiTe,}!.kI:rKA:[Iρ<3G"q3mvףy5orF{ٛ?Ir;gw!,4"Sg_~ 1 endstream endobj 81 0 obj <> stream xZK6W>ÇMޒaziͥCriz((rP?Xry>/!(\^p9(sY[,5X\X~Y>T._|j_ׂHb hz/e-H|Z>|tK<,ObP$QJ%O+%cR>KI-hux49ߍڍk4xUeƞ{G$Pr?dh&|"BkB#Jw@Zj@yL>2mgr:ѿw;*5;-nE="ۯ |[=2@mI^,lK Xcּx|^:ܛU"zG`@2+Ai:l޾džVfK\"* l`eRնRu(;3 @"`<_9]pG< OOoc|Z&ƪHi%Ѿ>qt'7'0G UMJ8kS/ܢx[ _%6jɣ>rRҲuZ9R^[TV4>] nɿ+qoQ> *1d[TQ|_G~ k9hix,ԻLaa4<{t 򎎍s WhIɵ=|E֨푨1͊C=cyG8b$\#ys:Yǔ9:XQtS!OM돩OQ$!iTf1>\ j(tͫY(9 \BH7r6&i"sY{P&K, xHR (FpN =!c0; bRsQBž#8WC$b5-b %z T7g9D!|tEBFэe~JGyăH:[F8یUEA1ƃmaR9'\$l8#;W4)q\6NޡJ1؇Cin8} Zi(E=p08t`ݫ Ρ)HbKF=ÐWߊj&YWm\#pXovDu'pz ጰI~G;:,+ O;6>cQ3MҸEfax+>dRNւtJKƽlvۄUN tTCR!1ͬ{A5h{`ij">FdT +wcfضnIﺧTǺ6*1N0ЇqƼ·(=rhUԔ'-lgܭo*ĵdZ[b:aWb燒|UډǺ+i=+ryUgDSgFö]i0Mz))PߗuTY@AQ痁DiL'8NNK ͚‪m) wrh'4As|MVvLc/Id*=0&Z2_^D){ueX:=4!Y'>)'4 Ս=|L59fzoC=[Y~ 0x 1/ (0@M{ן2UJn'UBJ 6SoX^ |ރ_^X!5o0ƷxMR-e2=}tq endstream endobj 89 0 obj <> stream xZɎ45 ]$qV$@ q />NlgEHh6xM'?z t i甌Cr@ ;Vx3_+rc~]'琍n{tu=+Hz&Li:6yt0+OGSW:wavVl&)^rK\p:xZ *ciU(lf}eZK="0ە̵tp) sҐdPDd(ޢ x؃\*B"{]j+[^*"AYL+A` Voqyڄ0 )*F L3Føzgt⌶V,؜T~c F7>ub+GAQ6q{UAlVȁ 橏3WMbFڛe W2}&Xc׉rei r_}/ԜT*1>9@I(Xщ?TgOAsMiԚp0Oar^X纍Ld%F#q# sn'HV3x\kcJl"<ɨLpw8k,"Cs0IcO ?ʭVGKGHeEK0f{EA;G Oeonh)0 ƉyB]j!l]p1$=d>\ HSu.)$DtPq|722*6 mccXE0Un fxX&/| (r9q,Z*+Mx a$ʨsUzN$U@g~bnw3$143)7>WM>Ci|W\ [wRD=F͠u湄C]oX*'Qa謉P m+tFx`et5=c_)X\`ձodHKlv靾ڬ||/i,ΕYLh@ ̻ +F1X'ti`Wk55V8ٴF^x\<±r_ .IK &v`ʺE<`i_g Lǡ&|.v)ZC^1)R`L} lC`~ ZZV<ז]mG챷.,Ź+L.\߀[7w׭OݖXga#aJS>E0~z7'g?'Of~6Y~_mʺf4Gw||w>Vot{]0wx? #7bt{[ܧg|^45[w|Xx)%_'ʦ~ǯ endstream endobj 95 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 96 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 862>> stream xڭVO[1 "G8`b;?m&1*8l-ӊ Ӏ1佗Ъ_c;>;NМ4Wp*û`ZkOop~aY hNs5CND̵@1g+ -GqZo3 ːd`o$7TyNޠOz-F˻-o$.$h 4'Y\=7ݢD~Kҝ}}Ze2Wzoz/Kl|Ѿ{Ⱦw3ɁgSERuz*zE2ٸ`o=8T p 5ɠ ԩrЉM]Sgݑ~"V b)p??+ t;j}(Rq6B3h3s?|u>;Hqo/s{4t}zJe&Ix6c $$Ǟ(Yujڬ8F=$T t@l_)e',%+N O=ZSthΨTL8!9ʾE'(+7(I8;(UB;Ř!S8[=[K:&$afK̖|T؎<9.b6zHif+ٖZleZ6)Z7YfA[zd%ۤYtlI:14|c?8\S,;:L֞VI'[|ĵdFތzbEns#+Gu0OPFVDPc~msʐRbAMPon)`i$N] ϳ]PQ:(=[l.}/fr,N^xh9msy}Z?Wn^zA} \7c0 endstream endobj 97 0 obj <> stream xڭV͎1 y qGz@JzC:mg/NN] Vg;y2`$_gcy9>c^): .l"3\X ̀!_?og+`v!dHfwqeǾXؖ /q;@rrfc|ڽ`$̲9wbU?6T7lwQGߕtWlnĘK]Umi: if Ʒ@u Ṉ_ꦪ)'YNBe;S`/=WX5!% \2vJt]Iw ǺKnPIj\O_E݈j "-}˴tPPVp`Kd~*dq XD4߸@l-燛Oq=fHv-čL^5q2fȵ2YDn@ŹXFsaPd,R)E2o[iztݩZIҡ*2A&eĂװk.˩`UBƮXZ7Ih㵭Nx3jZ(oHteYY ı5gG'JwIl_wgs<蕑 ΡI4mqLtٳIl^qeL4og(cTNs8t-7i.p @ZeugΊ,ꖵR2$7-vV4c} 78JG^E-Ѷ|[?(Q#kD^kb ijCuK!%4?{"o)h(:oLN  endstream endobj 105 0 obj <> stream xYK6W%oC6@oiV r.!w(Y92 3h3CW /wxV ~wN쇓V׶@?H?>-߿G2f!l}P!V<Qy~-Y%zWpYYm_g'h,Ɨw`+JIk_Nw3tЎ㝰s[Du|9-3QÓʻzr\ y"k!aRm RG&~h.ޱ-N<#oEK/ xu%q`BLu[h[>f"@ffbDs| S'uڊ;R%v/zvp*Ux E۸c񃇰'ayg$ܮ7퇡Kq,ܚY}vۅ{<˗:+$J~w=Gn7md{Y \64}o!;uQ8le$W> ]/s~$cSxק_rkDsc-In[YQ6=[إ"潿E ʥGbXjjBRR QxmP^tmOf&1ؑx}̛3:3{U9ю++"V*|ay,7T]ڝig_>3JUb | ބ`DoĊ 60ˮp@[ϼ2D{*bk&V!f}eF9XQ^IӨ#xeS bPFt3QyȊlC6/ib7ZW^ i 3cHxԄ$ Pq6lp+G’%5"|v[[ee BUPDE܌ QJoP+5 S4~3^\7VTD{wh>׊U-{KpvMPTWΨ\O9`\dZ Z8c 7xIp2 bGBZDUů8ɴk3.,y>hncαנ$i)BETtf-{iZe9kv plFN/gYohFLicm߈%j;Q+[|^Oqeܴ1U9zg<,,dBޒHV|kMc٢{y 0Do;^T5Af- ]˵,|yv{%^ZVWRBP%F pشW^2c..yLpb B-cB;1s5Vt:ܙwiC=lp٪'د&6$JV9b_9j t6~)3C d(|S6[ ꆿc-v$-cGH`u}:k zq:K1/2˪nZ1v܋[2o5vY$ g,/Ji/g׽`$L imC,ós(qk 0k5`|N`ƽ¹Uᮜ1@wi |T!,4=a]āVW~#'S hF]J-{;m!*Z0ŔXc= endstream endobj 111 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 112 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 870>> stream xڭVMO1 ϯVBj%J= U-TJ_' X}7Asb\_é>82gkgǟmp~aМjb`>9&3׆A7Ɯ=YWP[F``g@!ɠHn6PyNQao'=q F˻-5[ I n-Cqv%'Y\=7ݢF~Kҝ}}d o ;z_&}}> stream xXM6 WD)J ak 68NEI8y]Q$D|H>=|`Qv'=~LY 7!U$ܰqDǿ56 `M ep"A鯭6ZYK$vi+nqcZ`d֍^g2u8W#]Gq[ 96C5^.Yvv[tvShjĚrLyؠ j/Q0*О둫mb*6՘|w:T,:ŧi޵!ŠEE/btuκ|C0&'9ވbd),e!;#jhO;H?EB uyvCpK:"" bcIɎb1;˒9עh < h'9&ǪuaU@#֞ VLdR&ą '|4G#:貉&lbZ5'衭=sK>-Kή*ɕl֊5,6ybX2F]v&X&,vծR-[Uϳ`[$Z4v<W&Ioȴߡ/Zjkk%X&9ojܡ`i^jrnsGJgSPL+J'>\賔_ Dc}R"F%IBm-e=ZMl.P]/Ni 誤m97QrN,18lfWhL"v`k~ j`ϫKV[kI4.J3iHKߐ g悗&xjRo&]m-l'&A@+4kj쓹t;^ HJR:*Y.DpKBإ{. ;#sz! wFxp-\XY@?,W ,phW|uz:>Ծag9,.3cg E_dA X>I~47VLLL6^~Hr> OLR{3ԽSJss.K/Fiwr]"˹ˮ1Bi^ >#WKSc-cA\EC.b|~c|!c}p-͔CdØ9ONl{2ҵ, endstream endobj 119 0 obj <> stream xZɎ)M;C@nvЫ/S$e|1 @BtعTFv<:!NSRDcŎA#i>)yNeE[ivFv`-SE_QTx0#bJesr `@m#l Moj_oxMrt]躲+4S lw: ʤ$Jpa"h!@@< 'P0r<9A3$9I]`" \#+l|@S.&;uN'<=atQ^42(=88LJ|ر1V+cYv+ $jjPbۚ"T+ld'J~BÊ ޖC'.~~NETTX$LrqjӌdbyS(p)BDBB'mcR> ӅS[0.3k+6T>NEMsBC MTwc(Spn\Y) sxbh/-N+Vb3)̶)h?/ՍFvp:=Fd B|뛵.h)^{뺚u=}29JUțJ:r=UMxu+iΈ@'k\!JVvtwH\sD˝9- aHpNH넩O)sPRZӴwDuז/*(2$H椤yx !_ TKsLkx_*-[a7M^~YГR-?N86%B*(gLΊo(Oꗔ\TM1%S{oMlca4 R|u QhC4Y놔ѦX艌׉aDfP:-ƜD~Cpf*BOAA-Tي Twsp35ǢO@w4p4J0L qҰR'Tt$Ez΀\\uݮrd9r^g Dx!0}?뻈FKy D>Kjwf(_Eȅ ^D*5 J1bƫ-t{gXBs%d' Iy3 _5f{OSVB.9]JPiZtH.*V0$}UЮoQ&o*!PK1na<#{Yą}Kj5,Ck׸,'jIӁ@UML</6ryzݛVL[ }/(x<S:Z \ /tR_AvX,k38Gx R{FDD-#ES Z>= tR";H}TBp $eڢ,K-mSWjrfkZe[vK^ K̂.a-SVi^"eh1OfL7(nZ}#F&'ZM+39]yNّӢp;滮fi&WR$m/x$67*o9ty+ǐK^ĸ3$Nn$\Wd$ƲIIVOD"JAfKӂ% 7<gCSˋn%8r0.1*uNYm^w̟BctlS;.Ө szV NY/ L!{j$xOd|}L,LVy 8j6kV~ aAy=[obLNLvHM d͜вֺ+DX٧[oU)*?y/Y%~1Fo=ano;Q%nbwJMxmb[uT{JLaK,=Pnm_8CZB>KjF}%ᅬǕ"Jco𤙂zHJșb2oSYZZ[6N(ChgZYSaつϙ;gu2xm~ .9+.k zCպ66UE*'(b{A hm3Ww-C#6y9r,T9S]%*ݰ9S>qExq/nd;?b endstream endobj 122 0 obj <> stream xX6e8|"@b )V+7qa7CtAr΋_ ?AW/$HɤY8#m qQ+R<"sz3b, x[%lh AFJ)u<2B<KnK&` <Ֆ vIq֩CZdHaUGVjceT1-1QG2Gp!Z9Ap!{um(=N(',Z>kf8N(nuަ  kS;T ): O:7*!;'#e۠2#lTSgSJqSZW|(S.X\(B3nخGqvU 2UEp')b.4!pB1L3'່Td5et k%cOĬ/BcƬUf:ٶ*i_%"$*L_`ragA:v Z;E8DKnk+$ WZwiϪk";aE08\S6ja~9a?gY$dNn ~/@ZmAuC"3Vĝʁ"۵#(r;E+ȍ%󆺜)#ɹֵk##}';vA]4gQ"9хmo/6xTC}49#0 #j`5"ۗvkoe)m,15m,_ftnf׫g|J.p6|$R-CƸuHHKjGMoYTxKیDۚى/%O_`Xv6U>׻xsKʳ$^7wiotǑiR▫|͗y5-}n1&`\pEkWse~./ߐm9^Nߧ'^m^Td\\\JE^J3x`8Q5S}C͎m Yn/.VES&{3̹ƭsܬRϵ5 AII1>i3rF+\F )wpMZ#ɿNUm\9E.] kd;lct*5WwK¬jjR݄|;ӧ Uc&0}bթѸ D5J%_~3s+)}Bb1 '8,.aK?-gta( 1`,۷FT endstream endobj 125 0 obj <> stream xXK6 WD7P@oέa${v/%%Rg襘1b!><}̤L_O7|׫_U9S1i&P1 rE*כ'錭*;|a N?NOq*d J.feގ^krxE<ǺaD dyݙVj 5 r3S[m_oMq%@}91.Ν?Fl-KY7P[KQ+33gاAuD%bUދ3$Uݶ#ˬĚ뱏w}fPO s[ Ix]ʭql%>I?n[N1FBwˢ3 liR5OSM_;x蝲._o1/Iɱm;@':⅂gm9zzh`7Ƥ.1@^'ڂ lB*\2NYGH9E〻BB1+-eT[L @j5~Blۺ[-RA}`*ɟ嗹/b%ꜛTSWM@v)֯!c`R`iGOED/Y ɱG6Jf0/orhVO⫲V-{1&rX-l߫g6ue>$1=Ø!\f7ӹ1EM &%UZ;SLt:vLā\BCq;~.$S>86*3N-o#_otn:c αSؚMYii⎍3;+ؐ :J_ƄZ;,D*9lG,V AڒU$ecRX&jT,vցrFd;ש$dJ4$4?HOykN\ UB>*`$*%JA3OS'u9-EgɯXQ EB-W fWAtޫB-ݕP>X :J1)O e#ֵ&{|IօAVˇ[Mmչ,Hʳ0EęIyȯq煝^lY]Bʂ':ZLlcz񸦒1wԁ8ȜD{:]N7]&_A`̙Q}Q'L5vnطU뙛1`a#jDs7:h[8"eZZSsjePU 5A dCxdW-Ȣ^epI;ߪ &)U.[&CEp-įS2@^JuJϛ\Z0m 2jڒhc*bXW c'mINvz{2wcQ01eHRu|1Q zd8<XCnxU>?趙-!jƞR֪ pxaCj aDOCZY~u.`V*V.x-ۿ/; endstream endobj 128 0 obj <> stream xWˎ6 +VHIh!@ E0G.Þ)r#OO^ {dYd/ 3n>y4?V -Tmy^sD| Vq\GK%_]=8[b3C;=Gew 47c w@=@ '7G8cKIH0eF즼}3R} A & eZ4>ipˑ׏P-R~zdt0[BD os/b@7:FzbPT8vWI c|6N/C?ͧNZH6 LoƄw<ąWOY7JëJOx*"m?J'nzLl=uRCv2BWmPXtk~5SeBGrw$PB"USGOcX mJ.X2rKaHaz-|9B ȚS:]ŭXYK}c]!Xn 6Ygt2juR#t.kK2RD\Xgp=%'snC*GK5 *#h{/n !Ӯhb'm!Pe[WD9Jv | g%v:uzX&d[Ɇ&Ҳ+"Em*-]ncEQ׉"峗QVm~s`0ky. _[E5ϷuQ8|YƷi#T@懶dSQD{ >BhD魄%1ExQm^⊑]z: w뤫}5K{ IzJD=v_;[HPM> stream xWMo6W;3  t hnEe=o(JmɱE|>3 $%_u3̶ hd&59J(Y1^ucgߏY8Q+_Yѯ?_~3kW -Tx]!K{fH.xv.nVܚY>T$٬}մ&L5wq%JЗ8ֆl<` qb¹g>?sFcT"Xo֝e7^#aH6u\ʄ(hN'oDG[]u9Qzώ6{%^H 3L~G 9jNqbY٣{8 5|]_V:j2,eɘ]f7Lj*%gb_ ؉ȡ Шo|ܙ&P1gW/zU.U?79xc~&e.\ձ"S:\8QlpZՉ!ZMafчH9\EO#[1) @jʦ.D!Cgr3 nu./ ] / 6,lHG91duJHFڋ>0 WƽJOM߰Hh`pK,5WBߐ8_@Ftu~ۨ99紤4uZZCZ?T9 'rnC~+OK D+n<9#rxȟf]=lȖ)~9?]HJN֩2N΁sC7,}$q7\(3aꊲo}n[ لBtH}n?(<N#sIRnɽos'x:R|p(.Éծvny[,16-KlvS*PQ.fRy.pwy) 訃)bP+?&FgT&hyL+($ܡF>f 4Fw#y1pf{I"H@u!q38[6JS%Rf{l]E)o2zܑtSNxۿ/ + endstream endobj 134 0 obj <> stream xXɎ7+ͰX\[H|{uے<6D\j}j'G.#~?=!GP=_ϵ.#O! DtO`0F? ߨ:h+c d_m굝o.%)>erϩJ~ mW̞sr2vgAd$>B>UugZ晿CJig%eqG̐4q 4V#B~ek7#U&>S--!\)x܏gOm}_ٌC^b`ڧ qI.t$5 ~^X3՝u;Ӗz[")x9]j?*j)vܬՎ Y o% dvʎ<'ٛgM'IX :\{e%mFgKɗ-!o::z):zAi.?h:RݺxeJMn=z'^Z~m:M;~MUd>0bKƙ 'yqbЯu> lidZStr|-ܿ_ Ð: bkO!dlw@Mc~Zh)zђcX_s F+]8I ]f"\6^EMׁ$,C=]a}SB|U72J}eyQ wSx }m!Pk`8b @Oڵ4`u 2֜""pQ/LfuF[-PET*lb5nڍ%g)8.gXIn_ɓġ{}W_e!;`xdfUpGiٷ갱,vGl(~3Zsr&<~;}8_rFtv=bPcߍǨGyTb{y?~L7~zO75[TYly߁_{&Z斂)*^j}oΡJkdlj䗹 yQrc-Z=.Vtoj}^<S3x ~"DBm(WI[؋EVS3a0[) J&oMfC܄0K.vNT endstream endobj 137 0 obj <> stream xXn#7+ͰU\CI-A\29H*um˙`F6wz6 d"3/_ׯg/d2QocN Fa|9'].O(x҅б҃^ ")?Öy%yoY|Kڞ/:6 t6`;յN4I?غdZqf 9sbx2u91! 1 6FOU9ӡ3:O =oXg2bk崲iYm曁( TRƊ|59-͗ƄAri8xm|Z5͡5JJYJ74mJDGb仪FO! SOdq_ Eo+0qIjDC&TARX[t duDpc^GC!_LAt[-񠬑(?5ts5R fR /V ;^$: Άhf²Kľ@zUD>&{'ZǷ*6o*PzPg^;5_NƟxM.O=HNhJ4q5%&-刴xCìBpE!DuFzw7s"{nةnp?NWaKkwb2aZQv(-mjnlڮ۴VF)>[wy g >ltB4 fH,_aX?Qrh `_X'uIMsԯAP~U…HrRA'Ŕ Eh endstream endobj 140 0 obj <> stream xYˎ6WZ#RoQ w,&Yd5J^.vW-[(<<$MzNO?맿X#,U ).o>`q]ktHNwwjSʷtʝ/_>M(\R&p0Y[I0ɵ&[12 -FeeZK?0lwe'y{yq=^f/l8S݋~?A^ē%~A!E)\hޖթ:*L܏H+]+;*䎀JGXJd LB\h&S)I^S_. F<4wmZ*h lzm Zr_w7lsY 4#W6of3S\s ? R2ɓ-2hg4胲T;`!(G@G}j|#AIk]vp/P@a7f[Lmbqh2biߩf1>ST ;kۛ/CQ' O0Gl/»b (HMeφ48-Ejq+'еʶebɌAÑ򂣌TM@= oIf{%dڑ,?Y H0s.(Wdg-y.u\u-ZOY@}MC}*cyf",I1?_X[ J7ٗAd|1! S*R3=Ybv"+n*{e||R-Ūy51u\k(KQ&l䆢򵛲=)='DlnS"w6mS ]yf6h6)IuLiLUSFєc6JO~GRy{fD* O3]zjn6ڄ-Aܹ 344keo/D_0#Y䅰Gn&Ha #(pÆzPfҬ?Y6RTܼ<µvC"7&;?RQvg@_eh(+- e,(۱ƴ,kFYPq&_nZ3mS'!cz5xƢ0T7 )UMn]\(zi5 5(Dy.›g;?~ -NTl+xԸPi4$iEO B/A|g vgA2j^yx'â ^rDIx\\EҶ> !%w(iD7QS8H5 9[>XcׅXٛ0mtpu6(4ry\RLUZu!:**:]9Ć4;ŗ,6U<oJQH&Ț*j5̒ZFEƔpIZGKOnWݖrzW e^T`4pC[" Q)5Z,R\89 v;*;8t_c`nj!M};Y&k5yYz*PkZJqPT+SNT= GY j>ǾQ]uBev:ŕR? G*B{' ARƷsD('B*SzFfJNΣ=r/&]!'Ƽ"yӐwyVc7t9ySZArtf-aC\83>n/Q͈gHn yfǶfKi*^~/k#Z>_u<[⋘`O3ESk_7 y=7NdR-vbxzJ >8Oωf_q㚇 >^tq~?|T .r?_O}4\s!9h>aTţfF(d2&*zY{G>! c=((=n{iЫ,TbCHa+5?l???] endstream endobj 143 0 obj <> stream xڝVn0 sHJ$@S[ۊ}C))|h2" pĿ0X˜!p 141z.ۏ393D ''34pYN-=/OˋEB=vc}7{ gfD({symcTt_rI2#j5*'3xC^W.ԓ5LtM1ЍZU +Oϡ/ ׷^fU؉Cт@┴ U,KCVNkLٻùݢ`͚䁧at)X9)JQx==-9xP~G0OG5*Yf-ֆQ)Dz944d*r,xV-j4uGe'ELHYWM^큨IZI.8_tl^t(g:uRD@#A@@P s'nTAqEgk$%ap eI,DmA/ћ> stream x\ْGv}ǁ#\3'(kz!*; ]M p,ɬ%@\=wɛ+(4B(! I).Yu Y(REg][IMW -A$U2Z`~ @VFc*Akɢ6 &u Vc@a {<&$*I:ZX$$D9+D3@ߑ@ubP a+akdQQx2PJuݒN@PP#A@WG}@4A<*@ / 겖k:du,A'G6l-%JPf‰Xz  ಓhb.@N¸QA4H,Ld(I,`- Dc鈻CH#X&Tҏ <ljEE IBTtL2mIQ08$cSD7NIX4kA*pP2R0Ik)R5C4.)iQT3EYb E tG*VQ6IO_$=؎ ͑nH*V ,)_89Ij<$0H"OI yV5yId*оD)_l<"eΥEy1b;zbdlL)C?HNjMn5Q2ЇڙvTrzhC+v"7MaAA1"׈ ˜%7uͧQAn'ulGS )*NNmxvGAg<9Rr]_ȹ&j8lG^#T1)S"5)8)}lBt/)`N1 s<іfҊFw op?l`0_LrN>eb闫m1NBF2NY~bT?7bm}x1oo*F4|1MjY.T^åŪїx_Y :j!l$.*0W{^-`_c +BNpd ߬Wf櫯?6n=>4nz5}l|b"mUc Y\R:Wa-ui52[֕gު*߾5X!( +:Ƙ*.濛r@eP6u#zЄ QhL^n%bMe6,w@m29  $.RX6p|Y=s,6 ( RVj׀GV }Իa@-ߍ%69,y cJ1) ݱh_ =TaxF#KjDy4o \;:LGWbEUt=! Qc`@g'y#5+`gh*wxG?6->fw_bC0q19,cAZr &jK-Ħq@'q@x_bal_,_.7>Y7y1vX>nDt<}svqֳ_lmzpw:.ol^-?A|,li6w5ϛ/qa~|=ne|FzySJF=FberdM64|끏.p3+({GO[5T*wa9q>i>ue$sܡ1y># )!UΝ8 lf~c"tڱ7sW]_tѰŖOF?E>:5RʣF=X7 _> 1$+a1}|uX+lQ]'Ա⫛ /uG{@t>Az >5)'O&t^/ZvNʣw)bT AXT\Ph9#C.~|&9/^aE&`,=~~>/y"Ʊ/.q(.0cwsһkG=ÄLzNouT.?/n7727ۼ3Gh <*(.D5sϝ2"=cMm&|yC{ s )bɰ8&w(}? Ǥ`Va/,Tg.t`]ū;*n0|F B!y'֣||@/0a,R0H4CzҨ*Pw,Q$['|r=ŴL3=|ONDϺ iY?csVi̦[>}v ߇XD{CQACk]^]فPw%=XOڝP<x>^fy"Z40U}X e#&`vq)}*sX };,a?sK?H_U97f?t]h҇ěTgڦhz1~|ٝ8k5^t8QMeO7U0((?k<"2Uد̣p.<Ћ}wun~:Ogh};G &`o!@*ci3Tdw{};ݝ1;m8~jpNMiJ81tͧm]!Ď]xx&XhceT+SWmdY5.ay ֵX<؍ ϣfK 1V;eu*obFHY& ^ x!P9x@IXy7^e(fb±z^Yh\|>Ot4mn;RnX mLk'WfI<\l/ֲޮMkԵ7:ǴWrNQl,qmqe@VfVim`;6cc~Yv@|n~I0,< *6M-^ݶ-vQwIǶ`g>"ɔ2L+[n S>)Af K1i9͸ӏi3OHKǗY>wGd㲹R0"[;eg̿8F_b1Ѧ9?M[wAݵcnlwwYN#,I_Oƨ`̊5u X'np~͓@qвπ"I3>s\}%&Ό9#TjPӅRsq|f]|=MVi%@' ('/ ,@8r1 P];(o:VqGrydF߽zlXz8}z02aK]e̙<kOdw^$~h6و|r>4=Q]nlѣȵ3ɩ&U(R= "QdYJD^ RyQ!-"E֗v9D&"I=^d?V+ݕ;_O~P<|x\g g3 /_6._ '`7$_+˹iP틞usaˣvh['QB$7&۽MV*qO[B]_b endstream endobj 268 0 obj <> stream x}Qo0}tY TĐ jƃ20+Hk>&2UMWݖnTԞu lF9\)YqM=G`n]O,O6~m&}~选ev9pLUݲa5eͩ6v6+Ѝڳ60GP=(v m(Aj3WksE.?TVmd,G~( .GMyyהe+/$v% Vi^o2%k<`|L-wCd#@SSDBD9A$b )WDdϔ#-HsFD=өK*}"A$"S")C"21wlCB=p$6 Pv o; endstream endobj 269 0 obj <> stream x}Mk@=I1*Ϳش! *|;fcU|M@Js`8VUOZ ^vvڇOIM _,Z5bK#`^Z`rx\X <1mV+ud ܶ'A)̛6AgLJ3/o*nΗu(Lc5Ds>>ҜH9D6QH NI$QBdn!~T^ba|E])έKABGrH!I ۆ6| j/97k94GR99rM!J ,59.x՝Y(aJmӎY|_ endstream endobj 270 0 obj <> stream x]]k0$ZmAζź2-c'(ѲOqj@s<okG&e샩U+JhQNp.%$KՍoUeeGY*`G$?ү:nw٥Tcˡ#ǛvPźYX}376[ ؛f(Hrm T;ƭ0v:L xBZ%U ^ٜ^"^VDIH'Z9D g,uv0S+hIp Fc *qꅠs\o>XK'|QCAR_ Ɲ%]Z_R'/wi -ïLw\Pj N endstream endobj 271 0 obj <> stream x]j@}L)A]ҀF6meՋ}uI *8{Q"U(ݨ bWa؜M1,EKÌ[b~_FNt-W9}VR] qvjSQu, 6Ll++̱/UjDV(ӑlF-YFj9^\dִB$ mתɕNwt<\ڃ8]49< m@ࡂk9!aG %8$xQ P\AG>j[|#bTj>=Uz0;l=Ovrޥ_+ endstream endobj 272 0 obj <> stream xڍk0WcCؒEu4)յq'ߥQ@ϒ|w>Ys6gߖ7]T;tf8r8c(̟6f>۟ދekYD*ۏ~|wX7,3B~Z}rV6r}݋ހٜ\3:[`9m]Qhn"K?kl^w[h? W#@Z ɑ@b$I#H<Y$q?k)AJT!W )FXp^ pɌH9W}䳆2f0c䂞G]gX2!=!sMIԄLCהS xh!Ѩ`E0ML&&Gc_WP5xjܔ@WPEW`X0 M[EhBj*FIј` 5¬HIQh:QeA3&}$~饒h3AsYRFD4y0, h3hKeaF4IOSc4dc;ac/vbV̧˜^FM;Û "&^FN:ÛTPqT&.I.|<.Py{&ix0ԍh]ۍl=y endstream endobj 274 0 obj <> stream xڝzwXT 9ۂr={@1c޻""H:R,6"C.5DMb`C$hnu̞{ɛ}y<3۽T$]x:: k=C\ 43^E Vf"a0L2Lrߋ =3fDbɐ>af#'2bqs9Nh.7Wnۉ6FV U!V.*7KUH%7jbU0 q78 -{G[<8? "+fM?_C愆x[yO u_Iҟ4zg޻v($"S#bEHCSK[d&#EhH&Y"hؘDl*fĬ{{{}Rq_q?qbK@ \<\D^}&hFhAhh!Z6$RnP:@{^HlD^ބ~: 8NdԔ5jn)a=CG}Ϟ=z^vlYb>}K}Q}{0w3=0bAY/YBc,OlPѠrybb"CS 3xՃ5nHސ|\l5jW yD̳NmKlo2S @~SȄ=6|bLn;ji[ByM@yIn^4ȫ]5'ăX[} Y +) yy4b)|MhN-{U_W/o>nLцU{VJ(գ/T M+K 4܎ёkJ>#j=,Fi^Yu-٨ԛà'ik.~. bۣSx/:K/6}/iy'e,WD0}=Bm,88=#C#?ne%E . m[gZ }BVOeO_J=wSid`y,.aV] ‚0:bYdfulbddG0w#I_"ADF+tgW9?`K )dJqDrJ^ iq1xΏ: Sg5(Ҧ+`#J+,LշF"OTr"K `|Y7;"|ރ@='Ś|iy>,Gj$njt"c#gcGKOiĭ^>9_a^aq4\ QVOz~rO23x|ԭnK,8ќi<66HkkQ`iLV0}=W4_t+'z&"ܱC\AO}P= dP~SܝИAuDHV!Ać/xPL/6Xo6aX d%mKQbGw=Q 3Н=Ӧ&=$izݕШH=NnW;鱇Td =V ,"(t=] W?t[pOF'taXļ#$j%-9U>4aPٓ ғ|ˊ S89c22I+I1FܥV[-V'A 'vV,,@w ; ճ0WoDMcM.7@uB 6u.&e(q &S{T=I/&&`vλ^Q]+ʁ#z2'栏XOeT^^J}?eйn$ KWŶ6+ ׈gKߊo#jqU\mY2clObȼv_/6I=[BrY*kwH4+NHLĜ\$nI8V&[+%:[C f~[N\`܃7ezSu[q5Z9Z WajP$ƕK+rNRZ%:?fK~]1q9򒼜SV}(#p3 <Pf/ C鋴J<l͛Qܵ7c,*tuqJş,^ɠ3wώԨ#0ʮyj.zRdeE9ǔğ8.awV 삢CL[5+$LǩXiW5'L !Gj7H;Q{qER"]QI1 LP&uc#\6; Div+G\2N2WUZb uт=lQIK$>A)~ !uC D%H^)`q\Vwx ga4cP_\O1/Z֧4XWu)%dqv! /mqÈdޞ>w=.R׸f+\Tm~s0y1O(-'#a6pŔ_xX YqN,yeZ^ZJDi;~,KIϸvܕal6%[ЉKaQV@FLIlnyrGp +;$hg dq9/r8P` Fۨ# (c!Xx9(_C]*lܷ˙'(_wWȦ"S1H\ l9g]W{lUHĵS= k27~^CZ˷pq󜦘:* ލk%b9a:v>[d |!{IékCI;mh Ջ_vJ9w0\i ڈ\>C s wL{bK*݈k)өt|26o k\RiRS={(LGIJ>)+bJdKO-&閸&A\tʎw0LyLv,Itـ$K%LÀSWn}1]?rvKIο7 Z}H61{j@GdhfxU {6Af?Y`iV9u),B& P[/Ɲ7ģq;T?"Ȑj+ ǠEԝQ|Ӻ"aP@Я)NIw*)8Eټq?0:o:e7fL\=R؊؝hA6ڷ2~uV9|⯌ڢ-^[+4s.iهg]\2w>ػFQZP^t+ĥ\d-_\AI&v1{A|TM߾=jƂiF]ƣAOόdӴJ=MI? }ţ;u?|8Fyᒣ(b} -1ޜLWUǂ ,Wpy_ 0X Fm^A60,IٝǑ#441' 4O6+ƄdaMar ]b䪕乭ѪRoE:Fw}t3/ aviӎ&Yp`2rav   26n_f7|E Kj L`Ÿn66͇Gl߲H(9a }V;vÚ̯|`֦fpB46Dd3!yS, obƨND!T2 p"r 1^Ҁ=\̪{mifefVfVifnYeYf\ҙ5—mAKU洐9PW^e=2cYEiVXd N γ}M (u$PAٌCQc qIS]^W uV*.Y茙k$,~B>+nw f0[hw΍v ڐ~1 =# 5v#4f_U~Pcvfl~SG$6Ăq9=Gk#=,5 ջyh0Ӎ4 bM#Lrύ2}ozk⡷@Л0rfjS&,@ssѨ<"32*)=($ d tg t.n٬5[idֱDFFJެS%pȨANrpo^5m8ܽ>v 2iˉR#pI^ŹBؾ6R 2[D R*% ֈN M ̥&H߾|X:CbmNS@Lȇ/s/0! )l=o%cɍ0,ӧgGW~`U2hw%C|w &r2*X~ad߷g/t69nǶET{n~!nI`g b_*9A&T0W:j g JxRc~Β;.X=V(xǕ0lI ~٨!"h/`\̣޴e`eK ?,((Dͽ."@ endstream endobj 276 0 obj <> stream xڛ܀ V eIHv` d endstream endobj 278 0 obj <> stream xڍXiXSNs6*lsTE"*:T2Y* T!"(EZZzzjnC{I}wN++T*yKVR4}U"cB76(HD,6= qd5տMƄm`K$,;]6#J6$,$RLIxRMoRB(T Uj"5]]&N2R.PjA2(A$6%D'*aJ36Z&iT1MBdHPLձ!ᑱ*UB35 qۘQN4y$'AE%i" &.-,%V$ (D%8HHߤXH-VRFJtFj+IRN:D*#}F#rgh;E;PiIfQ7-,,tO,ߵ r:ɬbs .R^|X1`YAY\m{H&;coǮs_ٿAs?y%%5;ps$b'bԐv~:^*Jw$!\c"þ:eq av.Z"X + 60DÚȠ%ܰunN?^ \Py"rm0bI(gH"B26p", `wTe➋6z9X\:rptTo ag O]Ɠw0Kpܳ_ VbbR#[瞓d"Hhsh+` %9ixa9hjf&4e끇XEwv~*uz'ɤ&;;HWГ 4hJn@C$>&v?fOwU@mLLb9A#^0'~X Az Fޗٜ.ۯ>ҧ~(pqfyoX8<9S.f{m7 ,DފjDضC qS?3!ͥ/1݈K2FA8N,!4k4>g]]L5:XȻa u]LY ]9}hV 1T=yB<C5+;Wʿdj8AC(dog=o~d9}^xcΜ0{C)n:|` &6 noG4&}2z7WQPG+H9f^q:w/W&eNYXx#>7(JI2A/ (=~_;Hq7M^VPX$*` vNta{(δ\@ ܅v6B(fmJK.iˉX x|5U17apC|_^rU vJd˵kcxŬWr\_s'1ixm6 $> |R\XY~TfIj2GC)"3r˒E}М|:&r7r2.^¼=넾HW}^A|{sZ&08Y*+ii⿮X4XUTb8_MY`|UHX"~D5/Ə|SuTܔd"o*(mX1R+.>dh5[2rzXUz\>il2ቸ:[iQRK+JwX/T7'z$CzG+#_CdVai4 ^N#Qi#`(,amC޸݆ tnpp19i_ 2TO:._q߁e$-(z`}9Z G#pC+Z 7 %Zi֪pӬW!dKɖ0\-: ٝeO漘 GS`PV-rp90✇;bs~UɾƒX_[ĿSktҗM_LY5Db95% g#rJXӦr׭\>Ap>0 Tۿ7RD4b}eJCuy rGzfL IJ#T#}O>U$>:- =pQ~!zװd\4CnY\]! M[p(1O`E-Gk F֞EtZGsR#Ea$ڿK(آXq7{j-ͦUgyR{˻jkv,PLO/4S2zl&cp[WRY*߶Mf2?d0-,/O+ u⵵0>ZS?Vl<+y) fbA0d)KWZ%dC}Y=5)CF@RGKb#ZQts_A61V&Lygm^blA/ezIe^rÊch{XsX6RؑVH)1k`]EmK?ќɇؐ.q\ݗP.1l8In9c}7wvm0ꕛdy,[ɿcxK$Nނf.a݉2M<ؾ=sIKW@CjXGCꯓ%)*Dͷ{(*D$ȼ2t׊>FI 8̔s0CȜ()jěʖmI8e[vLJd ~!@h`$Q-122/F7G69d 0hib2f%ș&̧) nmA+l6Ȅ~P(6[MdWj+ӭ1_H‘Q*2U#N]h(,*>=/ع5C%3 8Nt]0[͘RV #h}6en, B*m W+"ĐT F354LihЕ#nV]ūSh6n9Sa>诀u(s xE.OYѪvS|M``:4*&G+Zŭ'Kxgyv'o>>Fy:Y -A*ÛVf.Ky@5iR1k}?F-d)|c z?`7V"} .+EQhjD!lJ)?kgPKqc,ee֟'6f/ٯ ^y AN~#4zs{9jzO"Y<`ƏM*z@uہ 9`!p-M`W0 O5kyXLY~`<cV|tb֮̽(cqbЋkMlfK5{Kq#_.1Mc\` 9,C.REXӢ~ KpM-g`/[!؅(qJKVGb.c6ɏ)zo3`.trP [YJCq*:"l3u^j27VE(.T^Ü(,ha.UX2I͡+=a9&)dcu7NXSUʹΞ^-WG+F>Z BR_<"KVG l^^o.=sfvG7(z[z"=m0Af^mFH$t:l<^63+XO'ca Z endstream endobj 280 0 obj <> stream xڛY`/_ۀKRahq_; endstream endobj 282 0 obj <> stream xeXy\WN w3fj7{ꊭZZp) K ddĀ"HWWĪUmj׺m][;h߷}c~3'<99L;;P(?_2J/Z=M7/BBA zr;9.Q ؀w+[KP0`b˕\_R8v<*:[H9{G9#}B##U+ݔdFmRGnYQ <_9P9,4UXLD'8~℞=BD;ըTjuX́MP7LCMp G$ %8$GT 8 ^ y{ EB;!RBAO(JpIxxx3@8R0`aQ/^GEKDDٕ(L{ǿOW}vHp8Ͻ_Y)OL9.w8V8ivRj|t(G=%cY?`d?Cl0:2{. VL_oUW+WaeU,kX6F^UԹ怙 <<\LPK˪{5aAVu+f|̹D Z+NY6e,,CON@i4^Vʴbٙ\[\+D*ȗ*hET nEJ-!1&<_.V^'A~gq<6CLGB 툛'S3t) E} ާ]9zk0uh yFvb+k4-`I*;^T ^n?8o_dm0/(`2NBзO  QW2K\Q,`0 , A*g0'[S7d%8lڄh85NOȽ_↢Eeb6~lc?VcL{BM6a'e==W o9{t}GQm&S"fb4?66gC񓥌Jq8<+ n٤\;iN2_4r7as+QWQuQWϾ 2܌,0{2g3tX'/SR67 m_!a4{`e[w\.brѕ>tBazw:W=;{~6UTZaI$c[d?s[]̺JϖqT.Q|%Sx7޼87cs2㱹Z*as(Pw-)ߣ''1y:s% ɧackpP5ץqqh@w wd#XvYJ_S,}MkXL$%(酽w3J2t`-A-5|]yyW[mb:k;[IjIqxF]9_|ͦ18mm1.k@aO|(o$I\&)n4L1>?rzAA= *n\]Kl>x.&"խG㣍S{rj711JZVܡk`Ivt[): D$`4Lxx+984Vġpj+ّ1FoKQ>QD[$S*ۤ,ˏe *i=}W4^`4񱗸A5۰U%G_:ꊐ_j1zߪM~V5e Iޥؙl8yf% u` &d4>fSa+jh.4lI63IfDM>=2:cjYgabzsYlsX K:^T]m - Ngy[ e’YRg m#lR%`IvblLZA,\n.90K $#26L^ wH=+ ]elћGOuǢDBg9Ts[ R_ ܧ` B>tVѓGx,Vb>]txYWrVKd` Ld=G酙zC'O#\}[=Чn"_) f;E$t D0ٕSG%ȎܱmMNCGܘ 7GNqBӉ:2F dLI{UxY:ǡ0R\ե bS4:yfR2۞ښ]AAm`hI~ Ii &wYe`aY]O՛wuoI-Rn=ݥk^/!-}dPRyRœj[7t:GLYJy}lZ,䩦׷A[nSP38ۑ]vmaf6ۅ9+oS Ew+<|MK gmKNyvP>G78ފqGE /X<8GbBۃR; N! X;,6 h^3iB'^F/0ᯫx0ҁ&au2טW!KEq_<VU}sb(dԐ)#;/?\M#v&%f@Ȋ%mK+*V.e`臦&c3[HFg*e֎FQ,j7! wa|t&0\jº)2}('w? ۛ_3mvv>TFjh5]8ynsƃ!VyeiQRjNg˨0Fyɿd53U  h%LU5 ޏWDPx*+RRa+ϭPC_ߵ^iE֠dyY5f &4*'B <'ED. JѾA-7P<ĪiUH 5J>O$;a6ԂnazWM|řsDf59E{ F!d"p.R\3ff/uBag1-/')&i:RArR $QTyTi.׷):VmtxE\7}R L M㞑ZcbeI˱=WVRAcAf+?Bou)0 X (Dl)JcjL3"璉?S>>r+ }qƂ-\!,Wx)J~Ҁ n$Vk !8-''-9v_j |Πi k&2="i!8ѥIxxJHe_e4K²5j:enY@-^?MX`PId#K_[@y^ [DPJYyrRD 2, wЋ˗?E%`TA xWIY9e@ F.4%5:EEK X\8m9xI.SQ_H endstream endobj 284 0 obj <> stream xڛw{_aNG  endstream endobj 286 0 obj <> stream xMX TTG~u?Z}.n DDwLdUpC "H#QD'$f hԀ" "Bh\I4[X?5ӧΫzwMƀ9֌;,qh7%^ a-8zn9[NFGlFR>NMߠeN2>F$!))Bۘ̾N%k#+cbbu QZMhMh\f/g9 n0g)!PΚT0Ns#(o;hNi8-.7M&qq9qΜ fs87n!yppŜ/-r˸ n% .[}ȅ8df wfc͖%]5M&;.{l>߼ȼQX*~[ɧʗȷ_)*'bX;h A[ zl!Xkk07gp0`+V7VCl{ӡˇ ] ї?xTytEҚSuuuGoJotE7g%zm0MRyWaS/8GlIٜ ڱ}~yj`:U]{>ٓV\:CJ۴3j}F^$a_X&_.8=Ћ|$݀3\LsX'oj I<}jO9e<MpP:K>;FBEx[]͙@վ= pȴI?WlX/֞"K}K~#z-EiG \E%վ 01Jt3*!닧8T#t2⡌0M1J<*+z 3P/tXXZy̹->wiL{odJ'ʕ 2<0 Yݧb ]'y Ċj%igKkH+3ѝ8ta\\SS[ZVs6DPpLjry KdscF LL576IIv>Utq'U-v ?/t ruv p\s瞆-L.-+,*=+(y,'#_A#e8y^d,APB;k(f{]#6 7C' @hB"]qOQ]/Un:Def 7ch-S.q1rű>H++DP]]'#[EկW[|:VH9}%#Er[i ] xyaDtNell¥]c !y ?{8$b6A BDAגAq}}Fm*0Fnq(LAe@_ޗa7Y)yʔE_+Ƚ2(:.P/DZ"u4*s#l4#M8&kb0vF'_l;~򙄅Vph00h޻.WI$K{~UP5?v#^TF\"0Mx܍z7N/o>"&uT߾SS ~c] gV ,wozȰDeT:;n}Qwhſj>/pW;=<#L^e"<^^ *Wn3cW-q]p53e,ŠcVV$ M[oCXIUEB)]Vza$>8fJ* 5ݎk2RF>#K$ȅdZ0@e(1irYp_0ӽnf?g!DwutC5}vIDQfJJَm6?=z^R bz7h։945VA!'Kh#pfiNhl5aKyիôUr܉]i7ȕOoL 4Cht( Mİz!69T7Q¡#0v4לO+bųWOg~H * |$fy\c?lE*PneՊ0 KxVjn|JYol6rZ,XSgTOnS*WB'!L$F3`w'5 A9b݅]}D3rUСlN@^Q &YPp迎0g`u }c^בht!0fq.sZxSO_OZbjLn_]eX8̱WSw'r\N ³Fx)Nn"V=wdl'X@ПeJ#"%h-hzayWT#}xX(H8-\aYчb2;W@'x4ysoX^?#o$ƽ 0Ieo z0&Mw }& NXܳqyj%<#}z~'y #s`o.=fw@'FA#wǸGI,Md= P1[o{B1eK~F ѭ~%m2_/ѯBmf#6m+754@ZAH>p,j7i@Ŧu Sx1@SG:5W-uAJYdssl]=Y=;!ͧFjjE8_p₨J@^{,H^ękZq jj Źߕtwޱi(u":tl]/$Em ud:+~_PAF0tf\[=?s8jF{)'BCiQ$Z!n"^@h]:56M|ub^fC>c\@ ^DՋ1X+:XT\"hU05_@-tEW5nrm] E/ι2Vzi'ش@ٟl/\^WWx jwTlm]tDyQ k5me5LIlFVX1}ÒMa9L(2ڠ&\D3{%FGV>,L [πL,6uFjT'׬x ׋ϋwe4T$G/qME6U))egRc #M9|pYKȖ}$iQ7ق΋\1EA $_O2dh7'`Ni/tMVQQ%Mv(AWѣCī$EFJAN@'{$ʑ]ߌ N \[@mޥȱ{Ez4-2ArpuCGJO5(ćPr䢸HXLb117c9, la m!9894GNr\=+ҤAj A_[ZK˞=V xJg endstream endobj 287 0 obj <> stream x2{T?45x endstream endobj 289 0 obj <> stream xڍzwXT7g C89x5V0'T@Ι4H9g$"fWUT̺iݻ}{{¯A#m^icQ1q{cF0p#"@G+  #, ĝzeX舜_ݡ&榫 F^Z(}TuS6miS`,)~.S1B-wpX'' W-a"ѧ__S?Og2?wO?W "#q< jέ@ʉy  ?#Cւ' dmZ2%ersv"8½+`(է7gaPkkIe-'f3(hE-}!pS 2*ޮ2-Df˴MNHqP]W a,.0pY=#A3fӱ\*g<"{2찅|'a:h%ñM]Pg[8o; v>=:sYU ͪ$ :xሟoF^Δ"ϻ+6ÕF?4+?}5cGx<<U0#US̡LϜih+,^QwO7Lo\TxM`#W0nr>QB ߷7ݔpqԷkw~U>Z/y#SCfǙGWRkqN $q&ָ.V{u8A[s}rĺ=0a*6:s;+BYQEpbaz{d}2<*80aX-ىI('>E9 wY}uTF7Dpo437p.,8wP g"Af!]a>Ux7V$1?KD]o]$5` B :^ L+ SNAd\25d5w x(\y)k z`&, ^ f@\!eL4$Ḵ|Q/{?r\m1Žp #F .!p>d0`@Teu E@OL6h^K/B>`G$>!p^d0`Ncog)#,#3 9R2 =dʕ2Y& 0N@ -XsA kpkfL` )WNd_Q7/`_|-@&L\-Y @.Ub7/j9hz4dO 0!pd0a#΀>J_nƙ LJeEI+5_ɯj|vGcT&ҋu_XxaRm|r(fխ;9]uK{z*2V<(r t6JӚLpv72T{ukB JFskyIl;5sK1NnjʣLcBL;^gt'0[I$ޮQd06^;o_}'J>a*W񻶣]m\H. f#e&Nd|l~Vo g9_kj͇EB vhGQ$PfQ^LpTlh\=(3" ? k݁(>]5O؄~eY$⿍) 7x ;Zwq_nV;_&n]~?Eʂs"8MSa]U5x$6 9Ϋ绎R#M^3CcϪIVc9/<g+w)2/8 Oi:H6%)VlX֣/o6r_rNvru+XQZSS^@L}@GJtG:xKՋͬTҵ­ u|[=@8S,RCaŅ$vQ o4 OiaYpzXq-M W(t ފujng˃3-3+1DĜԆ YW T~JHNk5FL<|#!;N/Rf|v;WczZh@>k"+by9 :~R? \1 /OV;rX 0ZZ xPt(4os!44Dܬ]!$Ɵhހ#S]"s$ g=!1M,Y?@BO@tuk+>җ3؊c-BD+ot[5?zx_I r& yVkˊ'<[dۼ;W \TBUk_N{P˵]My6mçI^ȗq|bz4']Ό j!!:{o̧0AݖjmlPXulFf~j}< :'J ct Lސ[`w Tg'x0Vbc۳Ϥ'Z1__WFs! PcV~l'&K S)▍Y-w7to,$t?&\g!& l6}ଐ ReU4ŪkJ2SԫIJU?AǴ+,yF@&U]U%;םɵjٺ$>G)wͰe'comlu>ٟIJ _?rv΂sǪʋ ȆF6#d>#v:(J_X)|ջ)X\tkQCCz(Vx8Ģj ?_ }dǦ|1$WG5ڬnSQIAaI=[28=6R?3YC,ea-pēY)0t8\l]+5%C3gAG8͘(O/-Qw(…K'i@85VOQZ ݁`6'vH}sƹlky)-gَ50;tMƆv{u\m<^E0қU\KRSd9 009c0!aC'g7JU)d:Xo .&Wk]7xbaQad<uCf$s{o}h }l `c+ɋ{;ŕO'=p8un8؝mwt;1[\9Lϟ`zOO,KOKOUė*jRy"⾜rvZy߽X)>?ap7ȻƑ: Q155'O|ܑʸ<6?˱>^='mg}1xHCQu67!' N?P|"YތI" o:~B5re[!"Y,,5kL9KR+U*w^c%QD%R򐏷DbA jU`T&QP AhuFmM*_ˆ (+jl@"ȹI,waNbio8K>o®zQ'8A,lB< gx ٧+> F] 50 股6 fw.:WcIiBzDᱸo0=*g0i*$s#L?,%}zOok fb$6%*T*H* ゴlj+Qy>VQ" ,Ty8ؖu v[s1?o¬:0=Ϯxׯ~?GHʞ;Ķ`bzp0"fŸk;QK[դF kcIZ~f.\=)yiQXpvnh(m!O>V6}5&`,S(rc5Ͻb}|<@Sxd?1C2P@ T7Uj {}N^! pc1i9 B %WZUxJ.ٺذجx2\AvmaQ ٗłphZ#4vɜipA<$Eཁ6WCQ;՛ld,~j/`o}O(~ e<,{661;HH qƻ#edJ$ "N9)K+Jc1넄U e!;%L_~RFRa5 '0hSm߂ÃKaj/"رe cN} F}> stream xk``j@RƬP< gxi#3*ah>&C=+n0㴢 EP L-PXet5 endstream endobj 257 0 obj <> stream xڵY[O#~ϯ跀"kR՗ P²Zس&XM|3clbg+ S]ׯYie9*N*J^Yƪd[&E*IetPwʇa4)c &l*r"%,% ʁBnlZUe*+DDZM.Vgs8ZzQBP(e=,$&#"ȱBYJZ |#;⻄r.4w0('(yU,q vOQFA12)ݛlG-{ٌJr@EY:X0=&4&:;Flw"/Vz :»o]6 *u l }Un%kB`*i7+bb e&&G'8uTV $\*O`8 QdLH|| BѐJom Y3 Hp!$QsNh@jK(Y q#-,#@`;\s K}8PX"ɂ`Pqa0WX+"8njpHd `124*"Λoaj^/PՋ/Oe,t?ŧӟ/N~zD6?Ngh:<͗z8 h|s2DQw!zz9ݣ{H\|wy=yGџ^ m!B9Je]ߴk^ Qq(3}BsnT.H ?.nS'|iN&e-ʚm-wsOk4/5xy6Qe3D8ihZx5"1~y}7on4a4[@{ZDZF i'5op6l6o75nyBCj_P74缕TWglnp5ofsY+`[̤W z{|<=OG7߱8?x'; 7\2lnak<56300ac4fc8fc67f9574d2101ad7d7c0>]/Root 1 0 R/Info 2 0 R/Size 293/W[1 3 2]/Filter/FlateDecode/Length 680>> stream x5SNa*zRZH#mʒ%ٲd)kֲd)dW(%&1f˜s3>ihpU ."^Rm0aq&¬肮*cF@$Ё=)z/`3l~[b F{B0x$lp|$#1 bvn$_$5I,B{URU{}nJjwoBgL.43)S%3:jL}zcJnҷw 4|k2M}R`:tTJM2[bI4S9a ? iR$#IT.dI kMRnI`!V"\Kqıw`qx܍qr<0 p"8 2Sp9"\ٸ W܈9z, 7܊p:n+p}xL> stream xɎ+fw@@l s3rH>`!E[4^ i"}MpHpLoOv'8@Pndp@eC<}zZ; Jׅ]t `oG46Y^F>?d=c_o Čj@GBѱ0.}OKWDfG:GhG ұO =֩d'GT8t Q'sa~Cc}LtƼ@>VI*L RtHVRTҲ>W k2O;"6)l]ƅJ];V1$Y7`Cu*&X\w4Au ;Un]d ˓r1|`.:2>q6іbGLwW|Ef^ TC"cacr]*ՠ͢y,cU Z;v1+3d1TעNtjc)0O4b??YTgХ]xg褉D['3x+/Y-eB(h $ j+Ee{DDCia2|T|tPʢ~Ͳv`.*{+k`:+|׫qO=}"9|1[j_=Uv;of w[;ӱfkF\5ND8c65}pZ|j?hJ9'IekmS5"b kgLNJ,DAj̮Y 'λt޸,xNi[D> #j( Ъ E聰0a VG,٤O) Q`/ -XIdٟI;0'=A6x@BU*9!~8E/??{i4d&Ǩ(?jS<+<~ydh ٷPȈI9˸GOe[HX09GcHl^3oܒAC~Si>x=-CglsθVv p/k4Z_ qȖDᮊqBrT 7TD8PM +}]?1Oj>k$/WO|AlV|Ij槅fB[,+ŷ^BB"UE[5^ǠtnP 7VW$J{(ѨU}|&kJy U?Q\am]z1~)Lkm8ɓm茇GDKJ &Hw2݋h(;45nUQRa?ËX`ZI6R\ܷS,vlA4x~𾦲Ոch=l\6gԙ aOfhZyQ&YVTC^?dcK{ r/V7s~g,%~ إrӪܬMqf]t%|4kbgu!0MfY|^Vw\Eg v;lz,e})[oI= EƝ;Q!=ds/Yc<8x]*&sVfۮfҍr$^Z\NF {0AM-̾N[:NXL{Bxnwq';v-;v`sm@PӝA6>CVޙr!GYHuӞ-;X'H$pD4șRV uZ=?8_7+56Hl<vT> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 24 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1430>> stream xڵWMo%5ϯki%D8P -S՞dhDC^^m]O .|<ޅ)9Œt%}_77G)޼XSǐ‡C[ ᛣJ95c!7f?jA~sFᘤDg-IHQ: +J*Q.) WZI; TggQcEJ[85IZ/֜"Ё/Ez{6˹&h6'yHj'Fa}&0|,^ul䟡[X?gnxm81MK%wƶwyŅ%"1Rl@Ǎ O_R?)~A5?ɿL&n2b}TًuޘHg$0'V]?Uzn|^!;KBbzG*/Zd((؉Ƌbݪc]0UCA `rE kE cz(%䏰d롘?; #:'aD!nPbȟD?+EMϲ>ݰS#I\ojSg#9L!eB0^hFچچyZv'L}oyy|>XVgOP3(*C ?wǗ_EC虮hZ|prnlf'KXM]7}VQ\k6r7vܬ=][ytPG\>~^X6/=Fs W 7Y6l}odlÐDx+]¾Jt w  701={>1WaI6_bu»viכ.zSNo $7ScכNzST<._uzS-;7M7]o m W777ܽ`h|sNoEފzzCsz+ > mF`#jz=tzvz]tztzt^o{:^+3|NoF?}v][u6N?~89k 7*eat$]sB‡wǟ2b" endstream endobj 28 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 29 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1044>> stream xڥK5+EooR$ŒQ)D9ܓJ,9׮*G}xw B-1g,)ld? ??oH17/~<,>~5qChG Vs,%<!+W2 [lž GLrpQX rY)W9$W1sqd2]'"^ 蔸i*7-KAi2qs䅜U8h4BVrtDx!/g,RT39/6w.A煍V"lr8/άӓ2(9oQfam~՜%ٝ,%V96ޤ,'σ,dXŲQ>IwQ~>7Q3,%IgvufEΗY|!2`SmRcl/f-gIG'7婰şw-ȪBtğ| DZ%<(m7 A6?omE~\[ךC}"GZ"?Җt?H,z?訧jDTv=5.U~"QvjV-Q.8֝X(꫓QV?m t h5L'pMc] 5Zؓ'{g[_dF`hpvZ͓ ,W;ygj+%bd6=# N( yr ;XINJ?P|؏0^ /N+W塹w.,q= q%w|Tn*g_zp_[y_xr'촷פcÑQo%?|߃W\>T~ד/M_c {_,|;竂vۭw_0[?;^ڶsoVΤIaE'| qKya#HT5~ڒC]L Q)9 endstream endobj 30 0 obj <> stream xXMo6 WV$E ȡh@oV^{d=mXJI>ő #W=>/|L]_CDH~ nB xNTG\ ]Jd%G ӬJe򼑧#p; ~ݯp8 ^9JvGV,K+ : vec6bSH^@NUʅaP|݅EZGe̼ kT㔀b yƫ*qI>W]g=)^vR;Y7 '<؟ʉK-fے}Lb٬VxCLl1F(xZԥiaSVBxpaH4JRH::Y=|yT ~g9 IA`H'+w&Gˀj/‘WCd1;'d<92\l%7 ݇">gG} uuyPp.&Ne&<`i*Ľp4*XNW3YD5q^s3zڤV mGF_' yVEZڇdsv-{U}|?a`A1waHÀȸvLH CՀ@ ^LbQ׌&zW^کc9]۶&ɆDFTuU*Ћb"m%;1.Eid},~OOwhkT$agw"gͭe*^\)%Bqy{*e&/ui/br2;B) %EKen KWQy130'N= }٘ZECǯj Fn HA,u⬚1 _hzKIZ0%Yhƙxγ.v{cYQHvolX}p:W: 1co/Zie "Q6&JxjZTuzIOK`x"-Ye%MD 0#Q1T,b(KV8rV8bfӖ&m[҇-z9l.ZѶlzmBPÿ4 endstream endobj 36 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 37 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1437>> stream xڵXM\5_#bk"@Rؕ8 N v jq X ?wwk#m>PrtāϜbIAS>z|ׯÛ#ŔR?|{H)qcH!-=7Gsh%cZxpu\tA2ж-/-m'h=t?'=fWxcg Fru|%puӫ ^\w6\'6l

    q;M8:No,M7]o > v -w797ڹ=|ڟ[)^oNoNoNoNoexV}zQV[^oNo{> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 42 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1118>> stream xڥMo^5Wx mH@*JP 9ox;3 .| ĜBqⷤXS-8ՑbJ)\^Rא[=G8Zch%YJj,E[-c͔gX A;=Cτ) = EAX$k[MO9:hdHI9R4--f<,*%xKa*^ G/d.^Ȯx!XQSḍ9oG⼝@V_+}!Tq N.6ưP ^)nlsrPv[Tmh`V> stream xڵWMo6 WV$Av 4|M/ =Q'lH {("G}qpo ?/?>O%fv'ė՗wB M3.r<@©KLLnWeU{b(12q۪a!?~E`Q5TD|zU,9fW֦ER)BIBېm)C)ҧIj'qɥ2x?6q)&Ϝ;B~F_c - UGґ˶P T?IÁPP'ɽ9$#YZlE&REFj<^Lږ#NYkuD˴P0SQ"w"JK/\ZH /k/{9y:JkR钻>H^@q#y OyB>Q²woB)mǡ$e>spnzmn)}.X}DܮT==BK{nPMW6# E2 (6Dzt(.> EŬnQT EQMQPTxTR'Xx%Z`ݮ[V rF]PTB\DWz[fM#I<#UzJOHbLHaX)thd/in4sLy8 e@gBin Śׇ@'+ (>Tz;It 1)wMFlP^ p_i#zf*3fzMj^'oݵH d3 9g|rݒ 4omZqs\/9k0c"]uQuujaa^֫n> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 50 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1326>> stream xڵ͎\5)oR$´J@hA #sNv5DF"tk_-9Žqr1}g >'-8~W|w8ė>~oɥw<j'}8J]UGX4|7(eVZ|]QjE龮(-4dӊҐҐYoQŇڎ"d=!Yۊ22222[dEA+6DmE6Du?"U9EbP|G/C1e!f! K4Dξ6FGAKhi M1pI,EŋÍ U*vFй K(vI&,a88dh)1kd]#N{@>G׊pעhZhE cȹ#P_%$'i4*9ʓ5Yx. [HZ j(%id,%;=籖N|-ŕ]95m|QmX/v1rNP~GnNs$5. =b08v3f4/-,2փ∋/v8\IJɝ6sӰHel#l3t8I˦Vbي^N3A7IsGDly؉e e~%rP,kx׭5t e1c'1kۇ'mX6p~ o?78p` uz~+z7 .<nwT.p7Rݵq㕺qkcg$d5slΔ`q⹿q9d.>6z^ ^ wvm?/Nw^Pvǎ4/N _q=`"^Om"d㑅Ӟ⹎ ߦlm_שoSn endstream endobj 54 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 55 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1443>> stream xڵ͎5^򿷉)HaQ)9eww5ն˧Unqoq{>| .vI?~u?߷~{}Bp_Ͽm&|}}JQypŗ|03]}[߳ru8)Xfo^ӎ'.>NxWnU9q#RcWMEpy_4>y}xAД\eMBƴ7/ 'u [QNӞ9?_OK3L23^ -qꃰsi('OA}H8T1闔!]':x7W^5JJXGS}8#j,F4ͯb`01&: 7|cvWzbeɕBQ?&L{~PhO!5p!U H {S3hƣ`{<ӴLcի6iO|LFоǣbcf@@OgC`1JK`8k]etBXœౌL=[ȝ[ӆ34 pZSvȢ;=|Xs[>|g9w; _ɘ|9͓QwB\xwzQq>Z%p6 G~n77Dyd//_^K}kQ&DUzy̞znQ+PdW p y 0sPa7rPe=B=P:LȖ5M Eヒ9>rz$jUKqz,+f2ʣC/C*](1#BdG3ʷlgu;F 2ʣJAxݭFyQY%> stream xXn#G +m߀Cl܌ V.1=S<Yg C*M[o?`ؓQGds;r?Z{x?ߙeAV=7+ZB>.l۝ڊJ&sZ-o+ٵgD [*F+ĺHv4(DYĸuL99 ?I]E9s ЮW"qsp>l}D/CO3#oRNIpr;Hgb4'3ҁġy5)3*+QMJ"Lɺ~n#yYf̆b{T3#J(w-< `G9~taa3 sF![`2->;2Ek\{#ߵإV} r @IvG#ıo*"BGHGQvNr țUI/0m͂zn^N`*Xwrޤ} 3vh0s?>in`Xg0mC.3F hWH|/epAXٕK: ?YvYȥ%Q]AŔa%!J;VRP´تeQ G6đ%%E +3.;ϓ&p乂iGG3DЫ\8('ٵp8Q{3c-e >n[R!']YNnr!~CJq;K[_>zӣ̋16bkl%io0)P32XIBK 愄|=ϯ_rtnas6#2oڪO'NyEyk7Ja Bnv o(E]{bXMZ,_>(LmBХRJW‡q!;P _7ǵͱ>uK´^> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 63 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1314>> stream xڵ͎\5)oR$ŒJ@h4FDF"k_V9Žq܇r1}g >'-8~W|w8ė>~o[uOGIGɾˢS]J5E_Z_*yV龮UZq҄|Z4DViz[Qo JVQEz,Bi^5ݷ@k*QU^ VAk ;mۆًl농ZW}C+Ťv>dL?PMsWltD.5h t4 =A5b) (  m+!6Wй]8TzƜBq(. OD Í~FBr,|SB |NHҠEXYhiZ9&,rQD ]G. k3ߟ"1PPrgXYa2+5%'" #Xa$hʪ“|cCWQx"ə45XaE߇%Wx֬̒`B 1`[CR" gƴʜ_5:m$8azS]gUEi4F31ž3^c^`W蕣 lj lB_nv腾6z]^賌^ Jz^ˌ^ˌ^`Ke=R^^`>V/F/ѫo4zo4z^l endstream endobj 67 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 68 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 1496>> stream xڵˎ5^D 0#@&D( 9lw׏%b?uq]^9q܇r1}o >'-~?~|߻?Kp?{s{!"Cb%GQɏރ#X ;稜KS.ep*W_xڕc#uM#O;CVrv =ɧܓrĨ\'W/ \}PQF´9~ 5-Uyio6֧}^t=nup u}:Z1c0W$ʝr1yparSV)Y!Ory?<'CȮ,sCϲxO~D{,_ķjDߠ'Y(€}4څ2+OL_atk!Lһ<iu(Dt5y}sJ;rA.K 23,ƴ!wγb" } L yׄ$!-}QZ*%&c!g_ ]XWcg{<汋AkTmNFYҶcqcgntih]Wua KE2"?F, ^X}07X[r\p.}m<4 Aޖ谑g`jO B}_w7'MG#Y?dϗ=(ƚEߟ5xonFa(;44B$ϓNm*evRe ԙԆzhlZ 몙lL듒IRFjUKqy,f7FPFJ7dGY2ʣr[ʳ^^2ʳXGKxݭmXAFy閌h%Fyk4ry%7dG33vRʣ{ʣ1a(.igm5:f!<ڧQ3$KFy^ %<5j!> stream xڽXMoF WAPl@oAs+zHx/ {Iv$[J-25!G*!!#?go+̧/P>$6OgfHj%@r>r=8G|MuocsI,\p'iK7k~^Jm%lßO;1lvwlmмTb˪lIS(J*6e8;`y /\b*` nG|\#I2JbFqjb(~dc&$8usS;7%9 >J11CG֞P}˓n+$oxNGѼv<2ǖyWp{ d?;3DA"E#Z!;?]`rN r{%*#SNfM尿>ۄozǀ%RKd lvFHK8 Bva޻ts`fIGێoȞ/p̢<5JQ#B{M8Vi!q)̴T=%q/} 6%֣N( "KL@,6x.)_' EaIm Qu~U(dxEj"8Q w)C 8T3(΢3͸I{U4jt\dcil:.7k%^Т_jIuHiKۇ4Df$>NrjR-kEP3M> stream x]ak@+cR]U! G{if_5 *:3묟YnA?]W4CkGj{:SZ4m=T>WKu"Yr_+=Oʈ/= =s@:^{BcZ{UmnOߦ!ڣ{M ^).}F'^p9뮡s_*{$o-+JfE\GuQI-6V_R^3+D:EP ]ͭ^HnSA؂a/CL hBf`΋@1(eLd~5A-M#3~fssfyf-)8 endstream endobj 102 0 obj <> stream x]k0+cj" mԖ=tg҇$7&s#[Kڡ q=\qS Ӊ Z>Ʉ?x&rq]8/ !ypsԈABQ&+ޙ4C[չL(u ((Kw]컣EMĈAmQBQoQ(ؿܫm:tz:x=> stream x]ak0+cMZKQ-lfje뿟59U.FzR7Rp {84ң#ᛷқ/6/Ǣ?Uo?-m(rMC"vYa#V$I> stream x]n0y _ZU09D"ksjIW%H h_{L9 3?}ou['7]*rT񀒪.Y^kѽE7gÿ?_/ "t}߯SKk,mճ~J9wߺK]Tӓlp;pm]Q*]4gsmZk㩦zι[v<6tlcCb)8Z9Z!ŀG mLe(#,h5'f/(H1Z"; VI ͗%,ÝLGNF%Ig0NQL*r+ Hg=7N=7eHN`L:e;`pjjd \-E,Dl Q**-*4{8},eexvDagim ''Wݨе]U endstream endobj 105 0 obj <> stream x]ak0+c@N'duT ۾xvCLgr"<{w9+z j@pң!i73'kޚݰfPïY2_ƤY gfEu LvIS;Fb{nA y ]^HuR=HC/˜¨`:Id}Y7کIU)FzFʑJQSK4H]=ԺFje4PaXQJ$})$fMVLXoƎ.C~z۴[pԠl. endstream endobj 107 0 obj <> stream xڝYwT׶acQswkP,ea(҆ H:(e(v-ŊXk&ٟ9;޼u[o޿gXԭH,_ź:A s՚ A,E`3adռ?w$o a>~TL7fZyf>\],-S[? 񅻇:'(v)@+WUW*Hrn Za:Zyyllv2_9jG KM6 xyYZ[یrZ_jo5'8_h 6~jPO?4eg1[?uIE}E2\d! }"RދMU7KČ#qqOq/X*#+'9q\l!/1ak 4 ApdltP2چRP*J&f?1'f_}#'$ͪ[x3 v}w=3d!OJXFrp8yxT :y`@ '~%X'ܒm/Pr! mqaƋX^:ٱ”)rl?v1{}$G?'Xf0 sw\_M,1wT,פNzQr7j2!jDda>Z 7|3\w в'D axv{cqNϙ Vn\;g9Syl?uQWp? ,ri х,inA"a"B͛u˔dğa,x]s `Prw"j}j$џשnΪŤ \N+=#٦E1!*IĖIO6Sj7 N-’DtfWnM?n^auA|?S7sui<1ſ(ո5]Qa1.]8 44ܡfzz5;t b b]q,ĵNeD ~ƂtB*FV& i^q?=ZfB%XQOHj4kSKEs.AӅh/1uRҊ Ҩ:̵|>n@N^4R/z/ _~^4~ĴЋ8_ѭ9W)*ܞm(iPUB]kT1>0BCw/ʉYw#N W* ` QHbL\x`p+ Mxߙ=A1I5SUyݏď6R-2iM2b!"⮂$+q%b]ՓŌiER0S^ f#c&wqpUE&%`gB3GV} K0$7{&~!B_XOky+;+[I3"΂z䆼37b-#I[P Jps$;ﴒvēok"0#DW1يHN.YH~9!zS?7p#0ݸ9EX#|Zwx'](n&CE*m*f !}5JҬc Bgr&V!-t_-LLiR^('Tʂ*'{gр},|* fv0s0#j2~vd<~4&L=g Qz$.z*=o͎ Hc23 ]g+ )d .e;2d8`:O9V/z6nsM`/.Ֆ7;YN;#蠒k[tG21ʐ=Mt1"&k}# ss*g9oȻ NC/0+WI]ynG,ui,S:v|ʵàwr"fW(5+-~wF{VP ,=x&Wo Xuʗl>%/۱%XJz:q=0FWm.QoÏ| ܐ[qvՑqdؔĆ[cG/ci` ǐ2aX⬦W}}.K\+5qktOC,yFeDd4\=M)/-aW O,Po% A`?;  $7*`5QNJ騘[\Ce5+x-~^SS^0\c*q="V㦑iD0} +_׷^S^^4g-Vzj0į%hQ>ͺ69GH:D7SCʊ:ܛݲ{Bۈ F}0wwgƼ --ORzcՑxz̜D<<#U9xґK*\h65[9yyz)|kj||V%% ));ߦ=ebZsc٢Kxꘪg:j_]3uz,5: />~\pkG AA+pZG>g'z>,n-]Z{Cj9Zנl\891ITBRZ.lc`GSaQ'B,FR3 9IxRJ_S pF\+ }D|g z.3Myb!ŸQTÎ=1.|hdvtQٺUSKLVo1,4œ}u߂mOf+̮ؐ1ؘd5C@r qX J#SĨ$L? ,%f4(0Pu+ߙVWt?> K K4Z"2('-o_673'a2CM^ߟ}+ο! W74)7[c-OXDǗ"Mz;8azj>=i NbmȒ~jiebxRVG/B+вӘQYyKq塚Il@ 7ŘM0+&QO;wY8nbD|i nЦ֔K]1\u}=tagQd2Ѽ$j] Tf=l9J1´e>%tCL`W/ygȌų0FMzJh2L+Qn[n9}@LvqKܿh|\'{6衜hhFhe ;&^ ~`(i+q2y;ײQeW;}֥<鏠-E h_;,Y4 XzSۘD;#kO4r$='~2>n2+o/j8wSi"^>rK};uDz}6&(_A)%_kf^VjgH3RlvhE̕@(j[\dT+skBUs֙q),Cӕ/M I1x 52] a^> ЋciӧOr*WEO_T¤RcY哻/Pg}O>໯AѲmN=0Q[C6K2W-v23;:)B&X)sN,~Fo'rmT{>[f-rrRw:E." o/O6үc.y|$ޗdOMa=.I9砿tx5Yn8:cc D\XEj^Kt0-uʵ%kܴd!iPq$"8) o("^2їnOt[?s0Р Ԉk-J ONIv$$d9m'A `HVf7Kd闕bٛ`– !=!&atFNq !tkb$ŘNt(F٩$$YC-ً3UȒUL<5̮H-]]b7rN-sBlLua K` }BegD~? w7T0J eJxbELx9VSsMMY1d4ĜX3azU 蝙{ qz_KKWls^i$ܗ}B fX.yqg1X CN^#m&-aNuI.>Y-!hi:4yY-<cNqT5DIF|J%PBu/AEZ~GRrD7E)Ypv;\mˏ8BVo7KQDI)gQ,4qX݅}{"q4{8Mu ,g%|*ȨAQ!J.guw~7UwC/5d,CFz,6=zVFw Ĵ}x[*n<;|2牞|IsBʩW/¡,LJ+mG/W/6ݬ (V? UaN} ?H>`parDvNg"p9,\ Z&؛Kfwa<})Y XY6!e2bȊ``@2(Х/}i]g}Fv{&nJ`kLe m&*e{2N+jT}IFj41H$ 5R%y+]Hi%,$fF>jp^_qP endstream endobj 109 0 obj <> stream xڛm܀0K8a  endstream endobj 111 0 obj <> stream x=TmLSgrWPnt{/fs`U&2? nҭ) e셩I 0`ꏉ`0v*8_DQcbL^v,9y<9'y2,LٲjaOIk$,J5Q3D`CBddo}1 lY%|՛˔1 [X]\ de!]o0یMTDfz͸ϠZVYZ{ mb.L#WI!&\.4}FI+ZJRbfm0v[FV̊H#z}AH2)Lla2rA>gea'$:O^ x<_u>;Ւ5\"'xOE0$aGxbx Zb9U 5M@o;lŽHme)ћO&}S_TRT.,?+:κgjFFi]Y56Զ?zp'FY>XQR&lh}9jDԏN9HIMy\d5^~B@ Wܚ]4͊)\H7/!Ht±;2H*k"{D_b$E4G} B=G.t#%.ߤ8`9Tqz!zL$Z87f.AޠȚ{;LK0 ıXi MH8`-3Ѷ>kET!%Q?ˉB\υNµ=\S!࣍-Mmvb7~O`+UWw։$ozuzu}ߤ0;n:N&Pi@(~8uP!w֕N3C9 "?x> stream xk`Q0 endstream endobj 114 0 obj <> stream xUW \W Q3ی΋-RX[ ȕpDDEBDnC @x֫jjZZkF[m/yo^fXƆaYVoSqQb#,A;AʎXCEGvv6{v$xc f6eʱJYF313*f]+Bү<~Ej.^ PR$J MIV4*ZL+WՖOb؞“UGFūIN$y.aeRTV2~ {7IMU3V赺dVOHvkW9(oŠ0OX~N=՟6jf~-k ܊fmX)>kؾlVr4mDhlM%ZYammNItT+}7^z*Kew SOHh!~<~4 '7 d{EoxS)06T6zĬn&{sR+j*~ެu3e"4VY s>S]JV"!&LZs $MrpJhЄw0en˃0~Ȼrwt\ץO")-/7sϡ-_`.X7ZJq2f݂E){\+Qqeg9yBA ;Zc0a`\LGt6t#`j&Ä?t}]:w;)i] WRE[*-f^cyg\8Y} bϵ ŕx Yz\W{%ikVW);1$"`|>DqoxLAqrd!q-.:2]O)=G|)~&uPu}V}ېc8m{7:@AFk% ٝ[Ҹ=3X>k0=YKB´8HvZ(bPYe?V}EO $yC#Co`^ÏBIo5Mg# yMEƑ1&˲3G'aGb'7);,zx!x@-229ivu``DFQcFlKmbcbvD>|SD2k8H3wQKuLMz'&yHOeV F ej$+dd[w!Zd)&{ 3: (3vbO; еDJFHh@y5YZ;Z DBXzCj\A~/sU*j+y|MmyGv|zfO/OE (-7oM#"Nkۖl˛~u A`@fޥ;rtr4pVm7VHJkj/; N .O(CV.H{Aħ߅CRK>9-]ë7hl`I`2v Yc=GА/8,fHKV^VJ!kEna@?d endstream endobj 116 0 obj <> stream xk``W40 qT endstream endobj 118 0 obj <> stream xMXw\TGfٝ"\.܋FφK,(\VAIJ i(";&`nj~箳7叽s9gg^@`FY{mb|W֨-jkΑsҋX,FYrhK$O1Qd8trۘ}H4ڭ1[΋ZXxvR63?P40(RչMvujy"Rq{EUo آ8+ʃFj3Jd6,  ЪCN 8/L$ J:d\6u$nOՊPw+gVGOPD(r 6ӜJ:bKSH8xJ46 Чh V iaL)1Jw3i]{=gh+[4H{<󃱽T%?4bI]t<8JzPTVQψM1v&{nj%kӴF#Wo ZVQM9r+:kGjX1~ o "uk^y4>1~K=vLa]ެ'YҺaqh5?y7˄ gU\ƝNO֑eSDw L&F^++ 0,c /a:+x2KMf փ;> ofjHA|n}pJp);)עPan]ׁRYev5nMbB1D]~$װJl J\ 0=l֟rϱX]nZrUhZBm| x#y!kePs`'3)z: ռ>3><)'4p ]FKdiT7<΍a-"<5ٛ<&ϩ&{LL%1a 0$]k-X׻!IS&'R)˰ yIY!k1̂HPv&[E3sH',UGs fV^z<p}aa/F<ڟUrZg%bFR%m2RZI\2h[UX(v[I6`Ç@ȗ4(CC 5 x[<[#jc&oU2P藼ʼniI^,#&8Je֢aYGr+`!(x:( )Y(@0ȉY} XEu:` ¾Q#'=!IG#K8t6J+ʳ<AR$k _bʯc7rȈDԠ9ņƢb\*p|* >*L*=܋Z^хol+ ̝yRRX ' JypA ; %syf)ض}"XKHv,yf u_\tk,ٶ#v͏!};l\'c R f&%P<,A&԰IY,"xi{zg):aWdixYt11 %m,,ȬѳsKSM_|+}~{׏t3U.U(rkzC>xq[%;}j-MQ>p:ڇl7@/GD*&z`UըruUڕUZukxVY4|kMv. b/DMAJto^ufJ.CCo]V:etz᪃[5f“c>#QV-9iG kp ڑ=T% 6Ȼ`tɡhe  UG3ް5>#'pZ;4 cx=_M/(ͪ&W]ړXZP͋Ռx1VEќWKV]IQ-V )ԗ-ٔO1 ب3A2m|RDű۽B?cOw?<ҊS6UڮjLE3A*&M d@۩!/~ vN@Yo]}F|Z 2̌É`  tíw_wghB#)z[&E"dIz7&+P06Ѭqq8OݦG,];A"~wt6,6 p雸;17.[h$(b^?7 f>U&Iʚ3[~b=q%Hrpa"_-<[\ N%Nj)=;/ !R1i@x&" `$dH`.^!i7C8 2r @(^kIfŽQX4.-ecs~6/PЛn:Ln%ƹhez NR{V(sqq1wFͶ%4* ܠ[6hWN"̄%dѨL)/՗~;0;ycب={X1x[2g:@3/t2st2/=)!.FjgPkHuCyvQv O$o'O=V̝M`ՂrKX 3Q&uлs*"$S+)V7'Sۖb_R)0 e0K E&Ddty:fz78I$]|mdRY- [ ,;_M<% ѯ1IB1)䬩?7>K3Jx y#aE2<jɥ w{:wtd;~{b4<@ %ɒ2K[. }dIsdk 4en>#P͉cv?pC D!9]~to~}f|L\zG434Jt7{ƬfYu,]sLQ OŸ{Sf-8p"d٩I;]wPt`#d6mː>`M@|FnN:=sJ95^1CL/."{lG4HkL+2\yܢ؎DL wi͚V+ ]SVboal}95VһYt>}ӷz-m⤴3/,6F] *դߐ&?նӿ&F<+wܗ${^]a+|gpB-PRM w>H*gw%m0ZA+:\$>~,NڽL?d$*}Ǧ3F|Vwd#?[.;|sO=t Jۄڽ{tɃH8n"m7HaMv !k=Ũ C7/0kz416*UF8@STOKZ`)|:3y > stream xk``"NY3B I3SB0dD% h1`qHV8gT endstream endobj 122 0 obj <> stream xeVyXSW#1 4OR]AOA*e.@aYJ l#ŀR@؂jU"*_Ww͌_zgn3Sy9s 2R@QʪUUj)6ٔ6O'{ ^!9W[(#F㻓N',Pycm$2@xjزC.nfl|\̱p_Y!EfF_`TjL&q^VIcN\9 SwM@Z0N Llm%DxJLM$T4%џDK*0J:qX(vE#mP 8OE^_DÉD|SAi`*4co<1Dٝ]!x?oZh^<.yyzSjhnup/>˶uS8F/b6%K&lB6ďkR3UgAUьok,~9lE YFCPz*DC|m ,90`~p*$~OI0G ,ncC[7VvVSޅJ}3.Hʥ-gXA>Ouٮ60j w$it/aI%+i?xsqx[1îW:WsuuX01Ggvk' !!!bvB&"8NÄCh ,~t1zr, F,^l?_]g aK4>:ZJi,+^&e=%>ZE hgR} KLbݲs;hfVG);O{XHQXEzM.,yx pLP:A X{SY<OKpԇk>z-ׁ= }jK\IWWWt((Oa"ٍOuHU_`rt4sa͵E ^gԶXSIL W`%ffeVw-k + 9lfo@4\E5*y8d1#x! `'kgdoi[nBIP8ЕnC5[xdw{ a[vjqV֩^aeu{NH_Լy 'O΂J(P)La5֬sVb&N6|G|ղ-m }<6>;uoy 47zE2G7f\O6<Rx+hݓcdkX=kBl,Ws0& ݭE2G m߰.jKΪ,ST=R~%^_f>Q"?nus4$IUj*j<8PEmeWսn\<%1]mުvyOQZ,NHWl$%=Zd*Q/5W؝}}Cwd@Si79x8#4(uqV {-ԛ9&WrmnP!O \ SBt8סxLǎ֠IiIcW&qc}{ RxGj6,cTIOVbzPBPH7l!pӿ)R AUw˒lC#0<{<<>>Erd;0a%k2>t#i!tfcՙlKџ7o]Ǖ~~ڼVۖRb$xMq yqs3)ݹ2@3GOeu{Pj~y{SRǂkTp^CV:@ڱҝٸ{եi1їգyUZ( |<ͭ\g0 Q^[B2{CcqXWYʠ;otU=Nk5 )9?~B/_%[vy߼_!#! JQ\QjjZ2fCvNc>orНF!4NV{a3zIױyV(P  _κ_º4?&6NRGϟ?lz endstream endobj 124 0 obj <> stream xkjf <`"  endstream endobj 11 0 obj <> stream xZrF}W[Me˥*I֌%)09! 4Hj-ʓRfs== 8IL$g)&%5ˬEaK#`CfbBq(eLy&LCiwITL[ӂiB1BIf@ hq(1r聊 Pf1c1 5Ќ{ qJLPXWa,K*Z V 8z p)1pj !ɵp7䠾@="S!EAP$=K.x2)b|grRLqy/ytSTU1"OqY$~^ˢ,_8*el%_vyh|md-Cmu)hcn?+&xW <9?ypc7dJqFٲz6ʦ(UIqULI?ʛ-# Σhu8-qXcCtOOiщzO/A[yy w߿_ 4n?'1?<{9JmG)A ۠ipvS1g7T(v8*-_f䴘a./V y,9}@u^R2P-S$͇Ē.UX--MjtvO9orv7A5yLӌUj! :WH;s¥.zӘUev߳]CtxV]`Z|Rovs֛C ~||ogǁ`jTu;Y:FK V>Gl5ھ,z5%ͬSDt@7\`]CzU٨ԁ~<6GԒ3p Ӳ'0ȆNxtծ'I5 U-3q XSZJ$FK/x1U] ]c`tshz)YpGSnJR\B]{iCV"U.*FAT=JQgZD8RFm0y<]H)`'P +0:3Nvt!XXҕ"a,9Tsl'Ã&fJsP p'9 pUQ+&NWShV :B#UjਂIA:Z4S v=-az&:I[6Ӛ[SqS?tJ"ujAf5Z4$h-<M0B!* i/0|WpշA5zn6I͙w篺/~B*&#ɲKM5И%MM,޲CCЈ@4q;i"Gu6<7h5!=ߗ n 9/6{I1<Iԭ"̷D!ް* +ЁVp3z3C}yyϛgߞ/zU׏[mڄoWXxXgkN) .N?2iNGu+:kk$kK/inFߟ58e @?H#7e0|o`v,}NeۡPmc)7YMMEA endstream endobj 125 0 obj <<374f60b83f64cfd788fa90c902432f28>]/Root 1 0 R/Info 2 0 R/Size 126/W[1 2 2]/Filter/FlateDecode/Length 328>> stream x-'K`׽{]uL61Y,&IpF-`lZEϟc<9b @'LL$ 88u$K .P =nH(֢K6!b Nwt$@<$J߾ܬI) M! 2dZ.eLlȂ[MryrɦYP ePzj(:"hFhC 4Zڠ$>= library(actuar) @ \begin{document} \maketitle Function \code{coverage} of \pkg{actuar} defines a new function to compute the probability density function (pdf) of cumulative distribution function (cdf) of any probability law under the following insurance coverage modifications: ordinary or franchise deductible, limit, coinsurance, inflation. In addition, the function can return the distribution of either the payment per loss or the payment per payment random variable. This terminology refers to whether or not the insurer knows that a loss occurred. For the exact definitions of the terms as used by \code{coverage}, see Chapter~5 of \cite{LossModels2e}. In the presence of a deductible, four random variables can be defined: \begin{enumerate} \item $Y^P$, the payment per payment with an ordinary deductible; \item $Y^L$, the payment per loss with an ordinary deductible; \item $\tilde{Y}^P$, the payment per payment with a franchise deductible; \item $\tilde{Y}^L$, the payment per loss with a franchise deductible. \end{enumerate} The most common case in insurance applications is the distribution of the amount paid per payment with an ordinary deductible, $Y^P$. Hence, it is the default in \code{coverage}. When there is no deductible, all four random variables are equivalent. This document presents the definitions of the above four random variables and their corresponding cdf and pdf for a deductible $d$, a limit $u$, a coinsurance level $\alpha$ and an inflation rate $r$. An illustrative plot of each cdf and pdf is also included. In these plots, a dot indicates a probability mass at the given point. In definitions below, $X$ is the nonnegative random variable of the losses with cdf $F_X(\cdot)$ and pdf $f_X(\cdot)$. \bibliography{actuar} <>= deductible <- 5 limit <- 13 @ \section{Payment per payment, ordinary deductible} <>= pgammaL <- coverage(cdf = pgamma, deductible = deductible, limit = limit, per.loss = TRUE) dgammaL <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, per.loss = TRUE) pgammaP <- coverage(cdf = pgamma, deductible = deductible, limit = limit) dgammaP <- coverage(dgamma, pgamma, deductible = deductible, limit = limit) d <- deductible u <- limit - d e <- 0.001 ylim <- c(0, dgammaL(0, 5, 0.6)) @ \begin{align*} Y^P &= \begin{cases} \alpha ((1 + r) X - d), & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha (u - d), & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{Y^P}(y) &= \begin{cases} 0, & y = 0 \\ \D\frac{F_X \left( \frac{y + \alpha d}{\alpha (1 + r)} \right) - F_X \left( \frac{d}{1 + r} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & 0 < y < \alpha (u - d) \\ 1, & y \geq \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaP(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaP(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \\ f_{Y^P}(y) &= \begin{cases} 0, & y = 0 \\ \left( \D\frac{1}{\alpha (1 + r)} \right) \D\frac{f_X \left( \frac{y + \alpha d}{\alpha(1 + r)} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & 0 < y < \alpha (u - d) \\ \D\frac{1 - F_X \Big( \frac{u}{1 + r} \Big)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & y = \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaP(x, 5, 0.6), from = 0 + e, to = u - e, xlim = c(0, limit), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) points(u, dgammaP(u, 5, 0.6), pch = 16) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \end{align*} \section{Payment per loss, ordinary deductible} \begin{align*} Y^L &= \begin{cases} 0, & X < \D \frac{d}{1 + r} \\ \alpha ((1 + r) X - d), & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha (u - d), & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{Y^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & y = 0 \\ F_X \left( \D\frac{y + \alpha d}{\alpha(1 + r)} \right), & 0 < y < \alpha (u - d) \\ 1, & y \geq \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaL(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaL(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \\ f_{Y^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & y = 0 \\ \D\frac{1}{\alpha (1 + r)} f_X \left( \D\frac{y + \alpha d}{\alpha(1 + r)} \right), & 0 < y < \alpha (u - d) \\ 1 - F_X \left( \D\frac{u}{1 + r} \right), & y = \alpha(u - d) \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = u - e, xlim = c(0, limit), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) points(c(0, u), dgammaL(c(0, u), 5, 0.6), pch = 16) axis(1, at = c(0, u), labels = c("0", "u - d")) @ \end{minipage} \end{align*} \section{Payment per payment, franchise deductible} <>= pgammaL <- coverage(cdf = pgamma, deductible = deductible, limit = limit, per.loss = TRUE, franchise = TRUE) dgammaL <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, per.loss = TRUE, franchise = TRUE) pgammaP <- coverage(cdf = pgamma, deductible = deductible, limit = limit, franchise = TRUE) dgammaP <- coverage(dgamma, pgamma, deductible = deductible, limit = limit, franchise = TRUE) d <- deductible u <- limit e <- 0.001 ylim <- c(0, dgammaL(0, 5, 0.6)) @ \begin{align*} \tilde{Y}^P &= \begin{cases} \alpha (1 + r) X, & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha u, & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{\tilde{Y}^P}(y) &= \begin{cases} 0, & 0 \leq y \leq \alpha d \\ \D\frac{F_X \left( \frac{y}{\alpha (1 + r)} \right) - F_X \left( \frac{d}{1 + r} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & \alpha d < y < \alpha u \\ 1, & y \geq \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaP(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit + d), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaP(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \\ f_{\tilde{Y}^P}(y) &= \begin{cases} 0, & 0 \leq y \leq \alpha d \\ \left( \D\frac{1}{\alpha (1 + r)} \right) \D\frac{f_X \left( \frac{y}{\alpha(1 + r)} \right)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & \alpha d < y < \alpha u \\ \D\frac{1 - F_X \Big( \frac{u}{1 + r} \Big)}{% 1 - F_X \left( \frac{d}{1 + r} \right)}, & y = \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaP(x, 5, 0.6), from = d + e, to = u - e, xlim = c(0, limit + d), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = d, add = TRUE, lwd = 2) points(u, dgammaP(u, 5, 0.6), pch = 16) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \end{align*} \section{Payment per loss, franchise deductible} \begin{align*} \tilde{Y}^L &= \begin{cases} 0, & X < \D \frac{d}{1 + r} \\ \alpha (1 + r) X, & \D\frac{d}{1 + r} \leq X < \frac{u}{1 + r} \\ \alpha u, & \D X \geq \frac{u}{1 + r} \end{cases} & \\ F_{\tilde{Y}^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & 0 \leq y \leq \alpha d \\ F_X \left( \D\frac{y}{\alpha(1 + r)} \right), & \alpha d < y < \alpha u \\ 1, & y \geq \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(pgammaL(x, 5, 0.6), from = 0, to = u - e, xlim = c(0, limit + d), ylim = c(0, 1), xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(pgammaL(x, 5, 0.6), from = u, add = TRUE, lwd = 2) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \\ f_{\tilde{Y}^L}(y) &= \begin{cases} F_X \left( \D\frac{d}{1 + r} \right), & y = 0 \\ \D\frac{1}{\alpha (1 + r)} f_X \left( \D\frac{y}{\alpha(1 + r)} \right), & \alpha d < y < \alpha u \\ 1 - F_X \left( \D\frac{u}{1 + r} \right), & y = \alpha u \end{cases} & \begin{minipage}{0.4\linewidth} <>= par(mar = c(2, 3, 1, 1)) curve(dgammaL(x, 5, 0.6), from = d + e, to = u - e, xlim = c(0, limit + d), ylim = ylim, xlab = "", ylab = "", xaxt = "n", lwd = 2) curve(dgammaL(x, 5, 0.6), from = 0 + e, to = d, add = TRUE, lwd = 2) points(c(0, u), dgammaL(c(0, u), 5, 0.6), pch = 16) axis(1, at = c(0, d, u), labels = c("0", "d", "u")) @ \end{minipage} \end{align*} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/inst/doc/risk.Rnw0000644000176200001440000007576715147745722014757 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Risk and ruin theory} %\VignettePackage{actuar} %\SweaveUTF8 \title{Risk and ruin theory features of \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} %% Additional math commands \newcommand{\VaR}{\mathrm{VaR}} \newcommand{\CTE}{\mathrm{CTE}} <>= library(actuar) options(width = 52, digits = 4) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} Risk theory refers to a body of techniques to model and measure the risk associated with a portfolio of insurance contracts. A first approach consists in modeling the distribution of total claims over a fixed period of time using the classical collective model of risk theory. A second input of interest to the actuary is the evolution of the surplus of the insurance company over many periods of time. In \emph{ruin theory}, the main quantity of interest is the probability that the surplus becomes negative, in which case technical ruin of the insurance company occurs. The interested reader can read more on these subjects in \cite{LossModels4e,Gerber_MRT,DenuitCharpentier1,MART:2e}, among others. The current version of \pkg{actuar} \citep{actuar} contains four visible functions related to the above problems: two for the calculation of the aggregate claim amount distribution and two for ruin probability calculations. \section{The collective risk model} \label{sec:collective-risk-model} Let random variable $S$ represent the aggregate claim amount (or total amount of claims) of a portfolio of independent risks over a fixed period of time, random variable $N$ represent the number of claims (or frequency) in the portfolio over that period, and random variable $C_j$ represent the amount of claim $j$ (or severity). Then, we have the random sum \begin{equation} \label{eq:definition-S} S = C_1 + \dots + C_N, \end{equation} where we assume that $C_1, C_2, \dots$ are mutually independent and identically distributed random variables each independent of $N$. The task at hand consists in evaluating numerically the cdf of $S$, given by \begin{align} \label{eq:cdf-S} F_S(x) &= \Pr[S \leq x] \notag \\ &= \sum_{n = 0}^\infty \Pr[S \leq x|N = n] p_n \notag \\ &= \sum_{n = 0}^\infty F_C^{*n}(x) p_n, \end{align} where $F_C(x) = \Pr[C \leq x]$ is the common cdf of $C_1, \dots, C_n$, $p_n = \Pr[N = n]$ and $F_C^{*n}(x) = \Pr[C_1 + \dots + C_n \leq x]$ is the $n$-fold convolution of $F_C(\cdot)$. If $C$ is discrete on $0, 1, 2, \dots$, one has \begin{equation} \label{eq:convolution-formula} F_C^{*k}(x) = \begin{cases} I\{x \geq 0\}, & k = 0 \\ F_C(x), & k = 1 \\ \sum_{y = 0}^x F_C^{*(k - 1)}(x - y) f_C(y), & k = 2, 3, \dots, \end{cases} \end{equation} where $I\{\mathcal{A}\} = 1$ if $\mathcal{A}$ is true and $I\{\mathcal{A}\} = 0$ otherwise. \section{Discretization of claim amount distributions} \label{sec:discretization} Some numerical techniques to compute the aggregate claim amount distribution (see \autoref{sec:aggregate}) require a discrete arithmetic claim amount distribution; that is, a distribution defined on $0, h, 2h, \dots$ for some step (or span, or lag) $h$. The package provides function \code{discretize} to discretize a continuous distribution. (The function can also be used to modify the support of an already discrete distribution, but this requires additional care.) Let $F(x)$ denote the cdf of the distribution to discretize on some interval $(a, b)$ and $f_x$ denote the probability mass at $x$ in the discretized distribution. Currently, \code{discretize} supports the following four discretization methods. \begin{enumerate} \item Upper discretization, or forward difference of $F(x)$: \begin{equation} \label{eq:discretization:upper} f_x = F(x + h) - F(x) \end{equation} for $x = a, a + h, \dots, b - h$. The discretized cdf is always above the true cdf. \item Lower discretization, or backward difference of $F(x)$: \begin{equation} \label{eq:discretization:lower} f_x = \begin{cases} F(a), & x = a \\ F(x) - F(x - h), & x = a + h, \dots, b. \end{cases} \end{equation} The discretized cdf is always under the true cdf. \item Rounding of the random variable, or the midpoint method: \begin{equation} \label{eq:discretization:midpoint} f_x = \begin{cases} F(a + h/2), & x = a \\ F(x + h/2) - F(x - h/2), & x = a + h, \dots, b - h. \end{cases} \end{equation} The true cdf passes exactly midway through the steps of the discretized cdf. \item Unbiased, or local matching of the first moment method: \begin{equation} \label{eq:discretization:unbiased} f_x = \begin{cases} \dfrac{\E{X \wedge a} - \E{X \wedge a + h}}{h} + 1 - F(a), & x = a \\ \dfrac{2 \E{X \wedge x} - \E{X \wedge x - h} - \E{X \wedge x + h}}{h}, & a < x < b \\ \dfrac{\E{X \wedge b} - \E{X \wedge b - h}}{h} - 1 + F(b), & x = b. \end{cases} \end{equation} The discretized and the true distributions have the same total probability and expected value on $(a, b)$. \end{enumerate} \autoref{fig:discretization-methods} illustrates the four methods. It should be noted that although very close in this example, the rounding and unbiased methods are not identical. \begin{figure}[t] \centering <>= fu <- discretize(plnorm(x), method = "upper", from = 0, to = 5) fl <- discretize(plnorm(x), method = "lower", from = 0, to = 5) fr <- discretize(plnorm(x), method = "rounding", from = 0, to = 5) fb <- discretize(plnorm(x), method = "unbiased", from = 0, to = 5, lev = levlnorm(x)) par(mfrow = c(2, 2), mar = c(5, 2, 4, 2)) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Upper", ylab = "F(x)") plot(stepfun(0:4, diffinv(fu)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Lower", ylab = "F(x)") plot(stepfun(0:5, diffinv(fl)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Rounding", ylab = "F(x)") plot(stepfun(0:4, diffinv(fr)), pch = 20, add = TRUE) curve(plnorm(x), from = 0, to = 5, lwd = 2, main = "Unbiased", ylab = "F(x)") plot(stepfun(0:5, diffinv(fb)), pch = 20, add = TRUE) ## curve(plnorm(x), from = 0, to = 5, lwd = 2, ylab = "F(x)") ## par(col = "blue") ## plot(stepfun(0:4, diffinv(fu)), pch = 19, add = TRUE) ## par(col = "red") ## plot(stepfun(0:5, diffinv(fl)), pch = 19, add = TRUE) ## par(col = "green") ## plot(stepfun(0:4, diffinv(fr)), pch = 19, add = TRUE) ## par(col = "magenta") ## plot(stepfun(0:5, diffinv(fb)), pch = 19, add = TRUE) ## legend(3, 0.3, legend = c("upper", "lower", "rounding", "unbiased"), ## col = c("blue", "red", "green", "magenta"), lty = 1, pch = 19, ## text.col = "black") @ \caption{Comparison of four discretization methods} \label{fig:discretization-methods} \end{figure} Usage of \code{discretize} is similar to R's plotting function \code{curve}. The cdf to discretize and, for the unbiased method only, the limited expected value function are passed to \code{discretize} as expressions in \code{x}. The other arguments are the upper and lower bounds of the discretization interval, the step $h$ and the discretization method. For example, upper and unbiased discretizations of a Gamma$(2, 1)$ distribution on $(0, 17)$ with a step of $0.5$ are achieved with, respectively, <>= fx <- discretize(pgamma(x, 2, 1), method = "upper", from = 0, to = 17, step = 0.5) fx <- discretize(pgamma(x, 2, 1), method = "unbiased", lev = levgamma(x, 2, 1), from = 0, to = 17, step = 0.5) @ Function \code{discretize} is written in a modular fashion making it simple to add other discretization methods if needed. \section{Calculation of the aggregate claim amount distribution} \label{sec:aggregate} Function \code{aggregateDist} serves as a unique front end for various methods to compute or approximate the cdf of the aggregate claim amount random variable $S$. Currently, five methods are supported. \begin{enumerate} \item Recursive calculation using the algorithm of \cite{Panjer_81}. This requires the severity distribution to be discrete arithmetic on $0, 1, 2, \dots, m$ for some monetary unit and the frequency distribution to be a member of either the $(a, b, 0)$ or $(a, b, 1)$ class of distributions \citep{LossModels4e}. (These classes contain the Poisson, binomial, negative binomial and logarithmic distributions and their zero-truncated and zero-modified extensions allowing for a zero or arbitrary mass at $x = 0$.) The general recursive formula is: \begin{displaymath} f_S(x) = \frac{(p_1 - (a + b)p_0)f_C(x) + \sum_{y=1}^{\min(x, m)}(a + by/x)f_C(y)f_S(x - y)}{1 - a f_C(0)}, \end{displaymath} with starting value $f_S(0) = P_N(f_C(0))$, where $P_N(\cdot)$ is the probability generating function of $N$. Probabilities are computed until their sum is arbitrarily close to 1. The recursions are done in C to dramatically increase speed. One difficulty the programmer is facing is the unknown length of the output. This was solved using a common, simple and fast technique: first allocate an arbitrary amount of memory and double this amount each time the allocated space gets full. \item Exact calculation by numerical convolutions using \eqref{eq:cdf-S} and \eqref{eq:convolution-formula}. This also requires a discrete severity distribution. However, there is no restriction on the shape of the frequency distribution. The package merely implements the sum \eqref{eq:cdf-S}, the convolutions being computed with R's function \code{convolve}, which in turn uses the Fast Fourier Transform. This approach is practical for small problems only, even on today's fast computers. \item Normal approximation of the cdf, that is \begin{equation} \label{eq:normal-approximation} F_S(x) \approx \Phi \left( \frac{x - \mu_S}{\sigma_S} \right), \end{equation} where $\mu_S = \E{S}$ and $\sigma_S^2 = \VAR{S}$. For most realistic models, this approximation is rather crude in the tails of the distribution. \item Normal Power II approximation of the cdf, that is \begin{equation} \label{eq:np2-approximation} F_S(x) \approx \Phi \left( -\frac{3}{\gamma_S} + \sqrt{\frac{9}{\gamma_S^2} + 1 + \frac{6}{\gamma_S} \frac{x - \mu_S}{\sigma_S}} \right), \end{equation} where $\gamma_S = \E{(S - \mu_S)^3}/\sigma_S^{3/2}$. The approximation is valid for $x > \mu_S$ only and performs reasonably well when $\gamma_S < 1$. See \cite{Daykin_et_al} for details. \item Simulation of a random sample from $S$ and approximation of $F_S(x)$ by the empirical cdf \begin{equation} F_n(x) = \frac{1}{n} \sum_{j = 1}^n I\{x_j \leq x\}. \end{equation} The simulation itself is done with function \code{simul} (see the \code{"simulation"} vignette). This function admits very general hierarchical models for both the frequency and the severity components. \end{enumerate} Here also, adding other methods to \code{aggregateDist} is simple due to its modular conception. The arguments of \code{aggregateDist} differ according to the chosen calculation method; see the help page for details. One interesting argument to note is \code{x.scale} to specify the monetary unit of the severity distribution. This way, one does not have to mentally do the conversion between the support of $0, 1, 2, \dots$ assumed by the recursive and convolution methods, and the true support of $S$. The recursive method fails when the expected number of claims is so large that $f_S(0)$ is numerically equal to zero. One solution proposed by \citet{LossModels4e} consists in dividing the appropriate parameter of the frequency distribution by $2^n$, with $n$ such that $f_S(0) > 0$ and the recursions can start. One then computes the aggregate claim amount distribution using the recursive method and then convolves the resulting distribution $n$ times with itself to obtain the final distribution. Function \code{aggregateDist} supports this procedure through its argument \code{convolve}. A common problem with the recursive method is failure to obtain a cumulative distribution function that reaching (close to) $1$. This is usually due to too coarse a discretization of the severity distribution. One should make sure to use a small enough discretization step and to discretize the severity distribution far in the right tail. The function \code{aggregateDist} returns an object of class \code{"aggregateDist"} inheriting from the \code{"function"} class. Thus, one can use the object as a function to compute the value of $F_S(x)$ in any $x$. For illustration purposes, consider the following model: the distribution of $S$ is a compound Poisson with parameter $\lambda = 10$ and severity distribution Gamma$(2, 1)$. To obtain an approximation of the cdf of $S$ we first discretize the gamma distribution on $(0, 22)$ with the unbiased method and a step of $0.5$, and then use the recursive method in \code{aggregateDist}: <>= fx <- discretize(pgamma(x, 2, 1), method = "unbiased", from = 0, to = 22, step = 0.5, lev = levgamma(x, 2, 1)) Fs <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 10, x.scale = 0.5) summary(Fs) @ Although useless here, the following is essentially equivalent, except in the far right tail for numerical reasons: <>= Fsc <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 5, convolve = 1, x.scale = 0.5) summary(Fsc) @ We return to object \code{Fs}. It contains an empirical cdf with support <>= knots(Fs) @ A nice graph of this function is obtained with a method of \code{plot} (see \autoref{fig:Fs}): <>= plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) @ \begin{figure}[t] \centering <>= plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60)) @ \caption{Graphic of the empirical cdf of $S$ obtained with the recursive method} \label{fig:Fs} \end{figure} The package defines a few summary methods to extract information from \code{"aggregateDist"} objects. First, there are methods of \code{mean} and \code{quantile} to easily compute the mean and obtain the quantiles of the approximate distribution: <>= mean(Fs) quantile(Fs) quantile(Fs, 0.999) @ Second, a method of \texttt{diff} gives easy access to the underlying probability mass function: <>= diff(Fs) @ Of course, this is defined (and makes sense) for the recursive, direct convolution and simulation methods only. Third, the package introduces the generic functions \code{VaR} and \code{CTE} (with alias \code{TVaR}) with methods for objects of class \code{"aggregateDist"}. The former computes the value-at-risk $\VaR_\alpha$ such that \begin{equation} \label{eq:VaR} \Pr[S \leq \VaR_\alpha] = \alpha, \end{equation} where $\alpha$ is the confidence level. Thus, the value-at-risk is nothing else than a quantile. As for the method of \code{CTE}, it computes the conditional tail expectation (also called Tail Value-at-Risk) \begin{equation} \label{eq:CTE} \CTE_\alpha = \E{S|S > \VaR_\alpha}. \end{equation} Here are examples using object \code{Fs} obtained above: <>= VaR(Fs) CTE(Fs) @ To conclude on the subject, \autoref{fig:Fs-comparison} shows the cdf of $S$ using five of the many combinations of discretization and calculation method supported by \pkg{actuar}. \begin{figure}[t] \centering <>= fx.u <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "upper") Fs.u <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.u, lambda = 10, x.scale = 0.5) fx.l <- discretize(pgamma(x, 2, 1), from = 0, to = 22, step = 0.5, method = "lower") Fs.l <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx.l, lambda = 10, x.scale = 0.5) Fs.n <- aggregateDist("normal", moments = c(20, 60)) Fs.s <- aggregateDist("simulation", model.freq = expression(y = rpois(10)), model.sev = expression(y = rgamma(2, 1)), nb.simul = 10000) par(col = "black") plot(Fs, do.points = FALSE, verticals = TRUE, xlim = c(0, 60), sub = "") par(col = "blue") plot(Fs.u, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "red") plot(Fs.l, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "green") plot(Fs.s, do.points = FALSE, verticals = TRUE, add = TRUE, sub = "") par(col = "magenta") plot(Fs.n, add = TRUE, sub = "") legend(30, 0.4, c("recursive + unbiased", "recursive + upper", "recursive + lower", "simulation", "normal approximation"), col = c("black", "blue", "red", "green", "magenta"), lty = 1, text.col = "black") @ \caption{Comparison between the empirical or approximate cdf of $S$ obtained with five different methods} \label{fig:Fs-comparison} \end{figure} \section{The continuous time ruin model} \label{sec:ruin-model} We now turn to the multi-period ruin problem. Let $U(t)$ denote the surplus of an insurance company at time $t$, $c(t)$ denote premiums collected through time $t$, and $S(t)$ denote aggregate claims paid through time $t$. If $u$ is the initial surplus at time $t = 0$, then a mathematically convenient definition of $U(t)$ is \begin{equation} \label{eq:definition-surplus} U(t) = u + c(t) - S(t). \end{equation} As mentioned previously, technical ruin of the insurance company occurs when the surplus becomes negative. Therefore, the definition of the infinite time probability of ruin is \begin{equation} \label{eq:definition-ruin} \psi(u) = \Pr[U(t) < 0 \text{ for some } t \geq 0]. \end{equation} We define some other quantities needed in the sequel. Let $N(t)$ denote the number of claims up to time $t \geq 0$ and $C_j$ denote the amount of claim $j$. Then the definition of $S(t)$ is analogous to \eqref{eq:definition-S}: \begin{equation} \label{eq:definition-S(t)} S(t) = C_1 + \dots + C_{N(t)}, \end{equation} assuming $N(0) = 0$ and $S(t) = 0$ as long as $N(t) = 0$. Furthermore, let $T_j$ denote the time when claim $j$ occurs, such that $T_1 < T_2 < T_3 < \dots$ Then the random variable of the interarrival (or wait) time between claim $j - 1$ and claim $j$ is defined as $W_1 = T_1$ and \begin{equation} \label{eq:definition-wait} W_j = T_j - T_{j - 1}, \quad j \geq 2. \end{equation} For the rest of this discussion, we make the following assumptions: \begin{enumerate} \item premiums are collected at a constant rate $c$, hence $c(t) = ct$; \item the sequence $\{T_j\}_{j \geq 1}$ forms an ordinary renewal process, with the consequence that random variables $W_1, W_2, \dots$ are independent and identically distributed; \item claim amounts $C_1, C_2, \dots$ are independent and identically distributed. \end{enumerate} \section{Adjustment coefficient} \label{sec:adjustment-coefficient} The quantity known as the adjustment coefficient $\rho$ hardly has any physical interpretation, but it is useful as an approximation to the probability of ruin since we have the inequality \begin{displaymath} \psi(u) \leq e^{-\rho u}, \quad u \geq 0. \end{displaymath} The adjustment coefficient is defined as the smallest strictly positive solution (if it exists) of the Lundberg equation \begin{equation} \label{eq:definition-adjcoef} h(t) = \E{e^{t C - t c W}} = 1, \end{equation} where the premium rate $c$ satisfies the positive safety loading constraint $\E{C - cW} < 0$. If $C$ and $W$ are independent, as in the most common models, then the equation can be rewritten as \begin{equation} \label{eq:definition-adjcoef-ind} h(t) = M_C(t) M_W(-tc) = 1. \end{equation} Function \code{adjCoef} of \pkg{actuar} computes the adjustment coefficient $\rho$ from the following arguments: either the two moment generating functions $M_C(t)$ and $M_W(t)$ (thereby assuming independence) or else function $h(t)$; the premium rate $c$; the upper bound of the support of $M_C(t)$ or any other upper bound for $\rho$. For example, if $W$ and $C$ are independent and each follow an exponential distribution, $W$ with parameter $2$ and $C$ with parameter $1$, and the premium rate is $c = 2.4$ (for a safety loading of 20\% using the expected value premium principle), then the adjustment coefficient is <>= adjCoef(mgf.claim = mgfexp(x), mgf.wait = mgfexp(x, 2), premium.rate = 2.4, upper = 1) @ The function also supports models with proportional or excess-of-loss reinsurance \citep{Centeno_02}. Under the first type of treaty, an insurer pays a proportion $\alpha$ of every loss and the rest is paid by the reinsurer. Then, for fixed $\alpha$ the adjustment coefficient is the solution of \begin{equation} \label{eq:definition-adjcoef-prop} h(t) = \E{e^{t \alpha C - t c(\alpha) W}} = 1. \end{equation} Under an excess-of-loss treaty, the primary insurer pays each claim up to a limit $L$. Again, for fixed $L$, the adjustment coefficient is the solution of \begin{equation} \label{eq:definition-adjcoef-xl} h(t) = \E{e^{t \min(C, L) - t c(L) W}} = 1. \end{equation} For models with reinsurance, \code{adjCoef} returns an object of class \code{"adjCoef"} inheriting from the \code{"function"} class. One can then use the object to compute the adjustment coefficient for any retention rate $\alpha$ or retention limit $L$. The package also defines a method of \code{plot} for these objects. For example, using the same assumptions as above with proportional reinsurance and a 30\% safety loading for the reinsurer, the adjustment coefficient as a function of $\alpha \in [0, 1]$ is (see \autoref{fig:adjcoef} for the graph): <>= mgfx <- function(x, y) mgfexp(x * y) p <- function(x) 2.6 * x - 0.2 rho <- adjCoef(mgfx, mgfexp(x, 2), premium = p, upper = 1, reins = "prop", from = 0, to = 1) rho(c(0.75, 0.8, 0.9, 1)) plot(rho) @ \begin{figure}[t] \centering <>= plot(rho) @ \caption{Adjustment coefficient as a function of the retention rate} \label{fig:adjcoef} \end{figure} \section{Probability of ruin} \label{sec:ruin} In this subsection, we always assume that interarrival times and claim amounts are independent. The main difficulty with the calculation of the infinite time probability of ruin lies in the lack of explicit formulas except for the most simple models. If interarrival times are Exponential$(\lambda)$ distributed (Poisson claim number process) and claim amounts are Exponential$(\beta)$ distributed, then \begin{equation} \label{eq:ruin-cramer-lundberg} \psi(u) = \frac{\lambda}{c \beta}\, e^{-(\beta - \lambda/c) u}. \end{equation} If the frequency assumption of this model is defensible, the severity assumption can hardly be used beyond illustration purposes. Fortunately, phase-type distributions have come to the rescue since the early 1990s. \cite{AsmussenRolski_91} first show that in the classical Cramér--Lundberg model where interarrival times are Exponential$(\lambda)$ distributed, if claim amounts are Phase-type$(\mat{\pi}, \mat{T})$ distributed, then $\psi(u) = 1 - F(u)$, where $F$ is Phase-type$(\mat{\pi}_+, \mat{Q})$ with \begin{equation} \label{eq:prob-ruin:cramer-lundberg} \begin{split} \mat{\pi}_+ &= - \frac{\lambda}{c}\, \mat{\pi} \mat{T}^{-1} \\ \mat{Q} &= \mat{T} + \mat{t} \mat{\pi}_+, \end{split} \end{equation} and $\mat{t} = -\mat{T} \mat{e}$, $\mat{e}$ is a column vector with all components equal to 1; see the \code{"lossdist"} vignette for details. In the more general Sparre~Andersen model where interarrival times can have any Phase-type$(\mat{\nu}, \mat{S})$ distribution, \cite{AsmussenRolski_91} also show that using the same claim severity assumption as above, one still has $\psi(u) = 1 - F(u)$ where $F$ is Phase-type$(\mat{\pi}_+, \mat{Q})$, but with parameters \begin{equation} \label{eq:prob-ruin:sparre:pi+} \mat{\pi}_+ = \frac{\mat{e}^\prime (\mat{Q} - \mat{T})}{% c \mat{e}^\prime \mat{t}} \end{equation} and $\mat{Q}$ solution of \begin{equation} \label{eq:eq:prob-ruin:sparre:Q} \begin{split} \mat{Q} &= \Psi(\mat{Q}) \\ &= \mat{T} - \mat{t} \mat{\pi} \left[ (\mat{I}_n \otimes \mat{\nu}) (\mat{Q} \oplus \mat{S})^{-1} (\mat{I}_n \otimes \mat{s}) \right]. \end{split} \end{equation} In the above, $\mat{s} = -\mat{S} \mat{e}$, $\mat{I}_n$ is the $n \times n$ identity matrix, $\otimes$ denotes the usual Kronecker product between two matrices and $\oplus$ is the Kronecker sum defined as \begin{equation} \label{eq:kronecker-sum} \mat{A}_{m \times m} \oplus \mat{B}_{n \times n} = \mat{A} \otimes \mat{I}_n + \mat{B} \otimes \mat{I}_m. \end{equation} Function \code{ruin} of \pkg{actuar} returns a function object of class \code{"ruin"} to compute the probability of ruin for any initial surplus $u$. In all cases except the exponential/exponential model where \eqref{eq:ruin-cramer-lundberg} is used, the output object calls function \code{pphtype} to compute the ruin probabilities. Some thought went into the interface of \code{ruin}. Obviously, all models can be specified using phase-type distributions, but the authors wanted users to have easy access to the most common models involving exponential and Erlang distributions. Hence, one first states the claim amount and interarrival times models with any combination of \code{"exponential"}, \code{"Erlang"} and \code{"phase-type"}. Then, one passes the parameters of each model using lists with components named after the corresponding parameters of \code{dexp}, \code{dgamma} and \code{dphtype}. If a component \code{"weights"} is found in a list, the model is a mixture of exponential or Erlang (mixtures of phase-type are not supported). Every component of the parameter lists is recycled as needed. The following examples should make the matter clearer. (All examples use $c = 1$, the default value in \code{ruin}.) First, for the exponential/exponential model, one has <>= psi <- ruin(claims = "e", par.claims = list(rate = 5), wait = "e", par.wait = list(rate = 3)) psi psi(0:10) @ Second, for a mixture of two exponentials claim amount model and exponential interarrival times, the simplest call to \code{ruin} is <>= op <- options(width=50) @ <>= ruin(claims = "e", par.claims = list(rate = c(3, 7), weights = 0.5), wait = "e", par.wait = list(rate = 3)) @ Finally, one will obtain a function to compute ruin probabilities in a model with phase-type claim amounts and mixture of exponentials interarrival times with <>= prob <- c(0.5614, 0.4386) rates <- matrix(c(-8.64, 0.101, 1.997, -1.095), 2, 2) ruin(claims = "p", par.claims = list(prob = prob, rates = rates), wait = "e", par.wait = list(rate = c(5, 1), weights = c(0.4, 0.6))) @ To ease plotting of the probability of ruin function, the package provides a method of \code{plot} for objects returned by \code{ruin} that is a simple wrapper for \code{curve} (see \autoref{fig:prob-ruin}): <>= psi <- ruin(claims = "p", par.claims = list(prob = prob, rates = rates), wait = "e", par.wait = list(rate = c(5, 1), weights = c(0.4, 0.6))) plot(psi, from = 0, to = 50) @ <>= options(op) @ \begin{figure}[t] \centering <>= plot(psi, from = 0, to = 50) @ \caption{Graphic of the probability of ruin as a function of the initial surplus $u$} \label{fig:prob-ruin} \end{figure} \section{Approximation to the probability of ruin} \label{sec:beekman} When the model for the aggregate claim process \eqref{eq:definition-S(t)} does not fit nicely into the framework of the previous section, one can compute ruin probabilities using the so-called Beekman's convolution formula \citep{Beekman_68,BeekmanFormula_EAS}. Let the surplus process and the aggregate claim amount process be defined as in \eqref{eq:definition-surplus} and \eqref{eq:definition-S(t)}, respectively, and let $\{N(t)\}$ be a Poisson process with mean $\lambda$. As before, claim amounts $C_1, C_2, \dots$ are independent and identically distributed with cdf $P(\cdot)$ and mean $\mu = \E{C_1}$. Then the infinite time probability of ruin is given by \begin{equation} \label{eq:beekman:prob-ruin} \psi(u) = 1 - F(u), \end{equation} where $F(\cdot)$ is Compound~Geometric$(p, H)$ with \begin{equation} \label{eq:beekman:p} p = 1 - \frac{\lambda \mu}{c} \end{equation} and \begin{equation} \label{eq:beekman:H} H(x) = \int_0^x \frac{1 - P(y)}{\mu}\, dy. \end{equation} In other words, we have (compare with \eqref{eq:cdf-S}): \begin{equation} \label{eq:beekman:prob-ruin-long} \psi(u) = 1 - \sum_{n = 0}^\infty H^{*n}(u) p (1 - p)^n. \end{equation} In most practical situations, numerical evaluation of \eqref{eq:beekman:prob-ruin-long} is done using Panjer's recursive formula. This usually requires discretization of $H(\cdot)$. In such circumstances, Beekman's formula yields approximate ruin probabilities. For example, let claim amounts have a Pareto$(5, 4)$ distribution, that is \begin{displaymath} P(x) = 1 - \left( \frac{4}{4 + x} \right)^5 \end{displaymath} and $\mu = 1$. Then \begin{align*} H(x) &= \int_0^x \left( \frac{4}{4 + y} \right)^5 dy \\ &= 1 - \left( \frac{4}{4 + x} \right)^4, \end{align*} or else $H$ is Pareto$(4, 4)$. Furthermore, we determine the premium rate $c$ with the expected value premium principle and a safety loading of 20\%, that is $c = 1.2 \lambda \mu$. Thus, $p = 0.2/1.2 = 1/6$. One can get functions to compute lower bounds and upper bounds for $F(u)$ with functions \code{discretize} and \code{aggregateDist} as follows: <>= f.L <- discretize(ppareto(x, 4, 4), from = 0, to = 200, step = 1, method = "lower") f.U <- discretize(ppareto(x, 4, 4), from = 0, to = 200, step = 1, method = "upper") F.L <- aggregateDist(method = "recursive", model.freq = "geometric", model.sev = f.L, prob = 1/6) F.U <- aggregateDist(method = "recursive", model.freq = "geometric", model.sev = f.U, prob = 1/6) @ Corresponding functions for the probability of ruin $\psi(u)$ lower and upper bounds are (see \autoref{fig:beekman:prob-ruin} for the graphic): <>= psi.L <- function(u) 1 - F.U(u) psi.U <- function(u) 1 - F.L(u) u <- seq(0, 50, by = 5) cbind(lower = psi.L(u), upper = psi.U(u)) curve(psi.L, from = 0, to = 100, col = "blue") curve(psi.U, add = TRUE, col = "green") @ \begin{figure}[t] \centering <>= curve(psi.L, from = 0, to = 100, col = "blue") curve(psi.U, add = TRUE, col = "green") @ \caption{Lower and upper bounds for the probability of ruin as determined using Beekman's convolution formula.} \label{fig:beekman:prob-ruin} \end{figure} One can make the bounds as close as one wishes by reducing the discretization step. \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/inst/doc/modeling.pdf0000644000176200001440000023266515151412437015565 0ustar liggesusers%PDF-1.5 % 8 0 obj <> stream xڝX6 QD- SH p ,oR7HQk7 1L_ tp&<}]Z?, \O.S:۪@o톲 ฬ,ZU_U~j&Ì69EUX5TI+b txpOD?6+njŁvŲqK_o q(#kGXyϲ=é㋘;:Gbel ,34w!uPu9I.5[ϙ9^~ޝoDhѕ(Q2y}X6 7YJ.jof>mr-W<uwé&VU2QLG/H~0>̄MZ0n2)'4;RZ' ]bqh4BCsZVkJqNhy&`jDүcN4BzõeA7%uo"\ɼʉ3>$jψ:xAE1MZ;:E{5D\iiUH O=zJgJnmS| o^|y3+4Hȝ e ^ DSBfYtOMN@kLܶTC91T/'`Ȗ ne4[,^Ncnx矡RAI5:U: }j=.gt+W8S^4U~|I܌y7q<6ڜx}y-Y.Y g&"dw_@<$' eg@gà:S` :Ö:B_/ w p_w[Ko⺨کN˓N{z1G)ԧ@nCް:zQ@DlfȒ;僒H{cLM|'˚2%cE]4[+# endstream endobj 16 0 obj <> stream xɮ+qP-Y^DJ"e,UM (m)NpN>o_?}8V}ZEOTtEkHtj{)C?iS R1Nvi=>ݻ:V~|t0ݴH!x|(h(HsBڣΨטK5o. X_0hnԂi,$y ׳I13<`?ʧ_nkd!{]Ax ၜQway&/.4>+eiטnĴg!'#篪fNYL*n iLr9ue |i߄x4JIlr  fG݅evsEͬA0uW+Ȱ4[,tR}9u2rfi_Md.~H ޶'w)h/xub[Ȱ%]q7SG|xtB2MKZBhe0wI8J6@!YQ\Q(<^oT(SG& ǝ28DEm#EZL3 i:v%x e%.X8vx:s[Ŧc/zq~|6yw˒ %8<>Xϱaky'.02nDWB4 $wu^YeE4aEe#7KÐM6|jT/!stX>z 0;zԚUQM?\7> LACG#oama8=v(N_u n;љΜIRӿ'! fg%Y]ɩ 9? E.Q*۠>/d'K3x5Lkr 5;M=|m.ݎ#REw:An0_p#ًys+ɭ̔XWU{t5԰$Vm|с,!7.΍DV0Ou~=4ȩ)VoPlXnXlUŁ}cZ&9!fG?zk xMnGKb^fEGUf1[* Z#. -@;tHOWxU7inGfCQ͐6Sqsb3Fm.-"G+ǻXǸy6.t5yL&'۾7 w|[Ь}][(-^GRI7f鋐aseZz2"-ݓխV)(c8bU֡qzZ1k+T }6tq[@P侱Pw l3M>*7lkmY)Sq۠v3FJldцިQqi#+sTl$bY^QpmB3WRеadg[EHXdO A!Ȱ@xFnx[CMg*p 5"q 6f(LJ]nIe0՘#,&[|\;zM\WΗsQ`ߡM7X /:["~:TJX)@QT{{9Uf>crOnr$2fQ,8 mg7dOS"&]~#8MvA\8/ ;Oe1T1q  ?yԈZz{^-v[啾].~_fWYB n՞ Fo$n]0]u!l۬QMYh4؅;P/}դm9W2P]Úc%E5{ַPn0նࠣDj8y ~$,qaOd4Ǟҩ v?iU}ȁt3Umr}%_<1ChYͭ5 ~il:σ\gdU , ف_bL SO}P£cVkSD@g/yYty=Ė)ɋ2[4bmRE~웷uNnܪ:v}xԦ䫟fRF2sz=c^0HS4w^NP-8~zsT| ZK\Η` endstream endobj 21 0 obj <> stream xڭYK6 W*J`%[x4ҿ_Rdva%%~|d>`蟞.'z>L@Fem^dm%=1}~y~ hJ:r@C)L/?Zi>遼qx4 <ҷiDZbif4̌tQ#H#1Ш9@'_/O! A9VBv44c95E; <K9K_<9=>OLI!Fh",E@L\]^)zoW&*#eL]C« aO'}ӻtՉnsHfPJ]XhZ sq S}*;!e`c;Å@E "ד'f J Dq^ay\Egm;߸Ԁ/լ$T.kh.%N-)I=ԂOp;..TvC62,-,IYgƲ%tڵ!(z;﷎@KM )(>7 a/zDnN> OGrca;v |s\/־5PjSdp@&D00k:I{yh >U7xG%YA>AF4 ŮXh EֺƛjQ@"l`-%4j -yu}ɛ -ji˧cOIC)DLfU_W"\A IdAdn8O`X.цMٓ07YthK^{}]HAIYv&؀INS}&3#tT,mKM5R_Ij$ra) Z̲nOUّ֩ma&݅tg%eNЭ߆ 6fqCU%BZ>.im9oUXfIN ;Ɣ VVYG;Lk nV9md Oj%:c߱%U6fR}f9* tZH;KϪ=ڗ#QYNTAy>\Tε0"(byܖfy4$[U3nfaj{9IFq[Ҹ:868iL}WlW&ŏMź(?*^b0MorkQl{C$Z9nz^MZ? : endstream endobj 25 0 obj <> stream xXɎ7 +ZDR PC@nN=3K|CR-=].TD>R\&5I&O|YYVxiw"t>3&c ?>/oè d10iT‚_/OϿSNMOɰqɢG^/翟~TҨ~Ïf?%?fA~l~.+03"#k{ʆO ,*abg`dz#S~K6L CvRF Ov+@Ԉ / d$,l (HH.荔\bӈm>@C_nnû70w-.+ HzK _k-Jn?4`iR}z )ϯpZ*/s\PI-Rqpp.y-?/a:(X2>Ks+Q n,/3yrbJgO.QjiѢ8Nb;A6b株0sIL? Gw uuH\D 9I-CA8F[k̜\Nq4aҝf_U`]WI~FsV"iqu9 fA RkGREJJj:ZYHjY{E?JhɩIuag1gPezB_`G64I/ 5Q©"S!> stream xڵX˲'+U*-R*;'ڥk&<%˩{4МӍWL0+_|@p/<Ȍ;vZaUyZn 4NmR[|THo:#QŜ0ANC0c*;>2M&&. *Xl@9e@[)yO1x{i:Df_J/} csϏϫ_" R;/8( .yŀ;̀;qK(R\PwGnj @>Kqq7W6V1iY-V*m^N uث0; S1[TeDDPmЌ`u X(b݄mh6ZKH9J-t)*Ȣl +^5Qmp,oP 'h+Ǝ:I:/B+ZptCfzfDClCC'RBikVS8K5Ԟq .wW"`1[f0,ί-ap(sM"+=Kx[/eK)&v((8&tCTjjfR [NkMQd?&ฐ+- Smmzpo+]FRbx[0mpmCzKGjGGV~a1HwSʊ5% ̎qIKaWzͥS5Gk.zЭ76-F;[ovl]ܴ[tԬK #=d'4Y ($ű2[ R,b$%QV+6&Iwzel#Ao_ryUϒjYhvkƥ{Zs\+kj4QA&`A畝CSrbۭNT3,o6nHNՆln5VD&k y'7!?FgDAw>5t~![4)wZ3rܨ3};crN͔~xGt^1J/ ya, C[*> stream xXMo7W mV`KV.!k8HBZjfv8 G38|;\.>a+cd+>HؑR#>.#?O@!??l>;,Cd'q^Q $OIR-⨔5w;TI)OEΐd(M2>0yF2$MFq1cz,Κ7@go~ї <rnUv34)Nĩmg+gnyV k o7 ~ԯ>%uk%Ϳ?WC@ɨbn0A*)/9}G0du…;OcyT Vg'\iIݧqܹdFR&0T\PjȘAXh8%$ZH7wʳiJj^, K!7ePucVQFVU8-*OKɍ-Rmw`V;媹='/TL!yls N &vPX#.9H9AҴwNOx+M4 >4Ґn#p/75dvF"YkOG6XwF*q\ןzQ2%UH84xLNMᡰ:;RRfUS u+9 K-G&^8> KO }$vvb v.VpQfPZ*M- `_oCm"MoG㵌{8Vu-!*HջVyd"Q)Ү/p:dA}mUNH)]i=lʺG,{ f?=4l]- o>b"~gXTLMvng:&[[FX7oovAQjL'l顷M!aM 51Q#2w3lN)1qs ,*QU ZnEa(j_|{c .e+\ݢu\ݚ)60Mvw`U2K.?v}bf ⢩ rXjd*nX+iw5Z|8O}Wb endstream endobj 44 0 obj <> stream xZˎ++D70E؀wN.BI M~ߧfhؾn.֋UuM<~/n۬>@R [q$m*zwZQN87p~zٮVNeGETPۗ !%F~I;sqi)G⒧|$ C}񂩧<_SU[>5SZX 15ձ\<^GʡxjP1 XO±Dթ,)W~T9w)^QF*T㲸.-R%%p],lrϴ/e֥[gfZfwmeN 2(662\aG]ǼH+ႇinɓZor=5a:Gc? M~ne MUE}{eLSsٺ}NSdm*i=w[1y Y}%Q ;B-clWfSrhTe6w|%EeW?-߲] 3oa$@eKubKTKY$ӃQC#VJEuS˺F':8NV߱$w&kL//W!%4/4)fT-JZ|&QVh1n,D+:QEǧJL(PDZ5_>aM3C5ZDǹfzZ8K gk(+5F!0CuӰ#ٻKrc\(^ ?LesUF-1S0(OAJR 1hєK(L֥q]$AWOO;-HB[W)׈Gp?(F\;M0q,@hb)7-2o_#>Hf8/C2Co g,V0 K=_mu#ȓޡ3@پÏ~ %Y|P裪9!5xY' yVCNhؠZ,s?* V슰0niaJ#EtFKqdq= CSc.:#BP`u+f|F1Y8EP&\שҹA`_b[JXl!]5R2ƲߒJgNrE 0ɧM3X,Q+-\sí%>D.+hIS% f.*qŞFg-Fut0Fvٱ nbߕ^)ÔyJv~=2:-V{Fcw}ꮬa@=\o2Hin"H~Nb谗8ZRNT)bNX)^RJ)4[ONqlj_aS;@S7!z j>wͭYJ%mQtHzBp %G~:A9t"r߳4`K#Oyf=0b.yaYj Ś7e,T)sK2W;(k Zln|n XL ᥩ[dPVZe*FS (Ob(w PK.t]R'Z*B^iw]SK! P}R1~`TŊFb]u-4ɺpI1Vh0+㟢M>p{@4f}h;-']wXj[?K°H(- cݕGNS#Bx?h4(O~'E 3s2飏<Vowp,jHرr,]!c |@qlGjb=^suhKZE#$O܋O?d#C-ޗB,?WN+4i ;x3v?C.w9?K<ڌx3|XվԫT9gs9Ǹ#n}13S7@_IR endstream endobj 51 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 52 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 517>> stream xڭTKk1 W@X_sMڴZHw&MJf=k2yȟI֋6\s֛3XƢޙ Qa+)"Hypax~t{X_`}?"0c7]?rx/`H@ #Oa|8.( ۉnxx])dL'=~7`!gchh"a$uhc,\`?FV" ] Yl80CaaB|F5bCBgEvA{h5dX'MWhy5){kYb-ѷEQF^!ɡWDʹszJ 3gK@YBqY^1NG!@idg{rPL tGjVi6K)M,q> stream xڥWn7+Mrs[%B _y\^Fؐ ͥX6M2#Fb Mi-syؖb4&"Lf!DMbl减F-qMN&z*y|,N=|0Dg,N[9M, zMxD7@ ;O.1@N.Ӽ46t"KSomջ_e4?JJO3IH ,M[)*ʨRTXke0kf5Ttt s[~&$֎܎k*T& q#d,aLpoJlfm^i|=җǠa-7ij&1E|`nk2,.qרF|wEtB(1^x0;uݳ7R]KuԦXn5t?OnWvD@5ihxsK#Q٦AiJjxVQi{6J+vŦefsĂs?b6O\0=5L3N#X&:K,Xf@)/ynB(4nz76|m8*PZ]E9iÆ2S)[:Ug7H bG zDQ CXt<e+Dmf7ⵈ2s)3y{'xǐiFߘ\ h4 cRѩa솆}Mw εu}6s4{#:^)K H,S0ܽv mأMyF M?Gd endstream endobj 62 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 63 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 946>> stream xڭWˎ5W2YP` E)̕XVM`>zEX̽qo-|']1%)` z#|^Kxu9ty{~yxCg|U5BK#}%lT$1bǜ1&WA$g 4̏Wcc|c^zR~&.T!C"o}%; #&08CC|[xKy} fپ  ZN;*fc{pg?tdVEUP-F͍Vj#}e AWa7w֐̭l\AS,s R嶸2ב, :YKOwm#~j#enAʶTȷ*0gk_Gtb;ʏ|I>Z8U̢5H䚙Z1+-nLֵ:¾PBq=qJ; g\ @2m~h,i-H#U#ߺH}%Ξ#TPBqWƈwI;ʶM-] [-/>k;fڇw7O u7 17z~.P7A0bXWjza!۝5u0|4.T¾=Efԓ]wgJY:dCϨϘW锺vn:j}{7|^Oh K/v3np%,j.#2KG + ,kXwhٖݢC ?_TEJp^o$*O^e0_˩a&'e`KN7Yڀv1w0UzR=ҩWLYwO> stream xڥW7Yd8=`{%^~ߏdf&|uq՛"e#-3ק7C`k+:Du":z*bf)ʩcr9WR෕Ǥl2C>CBH:.Ƙd"Ƣn8YgS2P8}]} N0׹`&'?R&;/Cޚ4d!)GaIJ-,8u+"jMz*=s^}SfNlv2<3ATbSR|߿>Sh|AtV]=G*^e(UAv78"g$v**wIX='= Nލ|پʖh,m:MX>(s/,Q;H5dB3E(QI,(+GIu+Dvz"I۞@ kW9@G jBzP}U)kD[BY 4d͞;S\`{Q ĕb%¶[86dաrs^UqEt:6)cT=$ՔJ2zڥɣڅdVM*'OEkMHW4#IMvϳXÓC+dQp9gكAAz˪r ;IR*qGֻ̛Eݶ#Lt8JrKȃLGt x}ZfvnMV~0pԤw-> stream xڥX;#7 +"@@lb%QڗŮHHx 8VoBb V+2c;7Zf@K|Y!&6sGണ̬vк 8JfF~{*x Hx+vA^+Q3 T4qjz3 u3tQl!5_ixL%)+ &魒2L6E6w+*5 hn,0N)Y%p}s?t'%SCJ#h@ڀf_Yz,<$&ɘN|'L+ܾ`uHtv?5-jVVŁ*Pumf-jf?-b-v$o=PdܚC3Ykf2:~Ne)cSh-C ח@IPxq*nДCz* x<@8:FMi:ekhrIGNZARʓcnf(28,T㠱m-;0eKt&3fcg+"79<d.JJVnZm̆z/'%~H.|scAd`YRm3vl lŅ tUL$8I ˪bAխ_r*>Ž&f*p[X x9ZYHt&넸٤=aS٫.9*/2jRc^4=>eqs7~]Y|h@moyi:NMYn5n!6-m1]^0ݍU.>I\W53uYMaZT5\lr8fU7kݗخzi]pkˋ;on>d~p`MægLk/}=*aIn RC endstream endobj 76 0 obj <> stream xZ;, +"z-$9]bgnnMIsfv6=3((I?8Er~߾-.joŠp/'sY:@Jyf|>%ȕNϩМM׷Hs̴< Jy7u@]ퟤ7, E'-'eՖ`iTlHeDZYʙOl4dŹ;i C΀},[gZ{X3n͊1>&ǽߝ,h [QĀ; 0.zљE%ڕnf+D.T},g&OIMB[L=A>驫ʲ}WG i\qd Ͻ; !nϤ63Ș(^Y[G>I}K[g8r KbZP\6^tXک\J~B·&(BRQ>2 'Qy2 DwK9%Hw¹/Zx"/)-ha!4oٺ}˜ѝ@fd tkeLf!6 &T6@boݶru_n꾸gv/Nvݗ}),7 f5R3][:l֠!ʸJ+p[4I/qQ_r/xj{ϓPSGo՝/ĽQnf?̌c'{3{d`m3TҾG4Qx2ʓn>I.Tz%a)IxXYߚUCJpY, E#teH#N ԫb.Đn>:K %0Vd`:]-zg%;g:7\8b+s+ ,g]a΄b(Vm{-'5AvXVg8RՂKhH,eO#]Ո1?!q A:KVӖiMpWm͢IPjR%V_S,z3Ynh NDpxV~؃(C$r}twSF*ElQM )|qI9BwU.Imx nm(e9˚oo RpSpK͚;)(Ʉ~HY%RoB= Iiia>0VRL s'7`OP1w/RN8ZœY,㼋m_ endstream endobj 83 0 obj <> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~ endstream endobj 84 0 obj <>/ExtGState<<>>/ColorSpace<>>>/Filter/FlateDecode/Length 2169>> stream xڵYK^5}lRČ4,j#?N|}X|Ulˮ螻^/q?P\Ͼz=}^_ߏ_\!^/Ͽt}}9{uui݈~t_%lnk>Fwͧ4E.pb ~ 6?}q|-.nƩ x [Op~.mW2BuwŹ|>B8\] kw:E}=a{8P&͞&=29k|w9#o-m*6oU'G~=\|KAL{[gFD&1gO#.Y ǀj'2[w, 4`Fn<b5w ^XIzOep %l,z/oэRӴ@^FDVi)`oxPaaSX5<[2|nn >Y%J1d@kDg w]xy8 ŧ h4;*)=a0kR(Cr"g1FÔđ)WmLRnTwfUbpOq\b㍓:#4|@8]#*Oq d:ޓLیd[pOq\`#pLP:D1y\El[u$Q`n)Ӷ"fSq)w[R*Ǿpَ6QwRN~Ctv0b-qt&G9ضh1&.f[kok\=-sɱqӻo_G %@>'p)yS2"yҔc5ĶE[G"%qa[kok\=Ms{/>G߶;\&h6&W(\(rSP(\2y#yU L. nw$ܸWglT).`_ٸg:e/BQ߬ /%ܡl"W'BnSAyKEQ<\(\27P*HSaźJҼXASEdnY[t=.yGxO(QOQ+?L/=6~FJY0ua#UyoG?\g=(7S̸ '/_ޠ'?W}u͗;M^܃nOZ_XRIJBqDJ'>*%͂ke8I,SW7%Wyi&sY,/0!`hBc:p% 'i2f{­_݄ n)KZPlDi-yiE ^TtjhGէI<=Hy ؗ/S^ khpTMiY`خ0y^f5|TSTf_Fp]ʣS"-:1.ʇmANr׸m.U|Pu$I7:qfY;=X;jfm:' Ͷ ['mr˸k:ta}A@mM*h$F]'ʔs?ضh:R:HEZۂD[|\2Υv:IZ u+;%7)#^}35k5ĶE[G]& Ǵ͈ͷ-.ֹ% Hl; <(1pWd\Eaf$Nnl mrKe.O@@W~l endstream endobj 85 0 obj <> stream xYˎ6 +70Ȣ+L.DْGN-#-Mꐞ`S@7+]~=)+3*f@P&&Iyo5(~N?5}JW htTC ~ztiK 4~!&Оxmu@5Ret uZޙ3:˓wYiN6 E Mk] ;Q6EEV\* a{ _e8nLQ/SrBkR xܛF9r}g0 2{2XPȷWUt6CcRֱiiq+/bIl#NDl qnKnU[/1 c1j-ì uE˲F0#ʓFB˾dd⦤"%XQYҧȊyŝ~..^90ls'2Fjyu %߫L㭄[&,)Ts@kI2gdJ&H5b9;K6EED"VŃ%+hmZphCBGMDC!]x(yPh㊽Lf1u?.ҷ3J셉x˴LTFᖅ י|+ )3Oo BPJSf_ȴpr'F 7.$uM_ 3]m@Bq޿l=m)]Y/@96\w ,4S&0P6@պ20!e6{ROr}Z>qBKξ^I8PM*?0, ߓo6ηcMQ$>:ҍP%@H[՗@#\}ƙi'ƽO491t`I&(1n#,*|Hpvu>;$B'0)كzuZg%"RD(pbyx"*6𡐩 Gːܿ-( QڶY@w5脁G|?=h>ބMw|umKu O5}^-h.4nG{g{]mQ K]}qWO t? CGuY2 UmQrW rVXG@+P](jGF?9ts<6&sB1#3ȨlASvB7mSk}8ɉHXYpw@9FyĦA=nk½nWV[mͅ@=|LެӄBXT!d$kLyHQ,(R>OaCuSDET2]99_; ؈`xоsSQOL^!p!񉱛Wty4UL) JO.y!~v<}z43OF+UuJ k3+]IbcE vwX1^R=sk}iYK?ڇ f rRdya2k+]t>;9a Dh=+w  0+|0xٓ7uhtd0 rok@P:{,)q endstream endobj 91 0 obj <> stream xɎ*@ٞ[Cć?UdꙞ8p0O%ڋoN8E0]~ow d8ݧ0!hzEǙ+O7p׏w<+khlkWT/H7%xH=$2)iL(,VV߱ Ph9?߫Vfkq|#FxhfARw% ,8Ri,x:ι7Jy$TU]]n fKJ;ΐ4`Y_ , a>l䔔w'G2F^նt B-1ٌIT(n0«&37 nԾB mz*/(~ʣf>/eԻNPki3vS((y.>iO ,}Zorv-pjKD͑#v_:ak:Y\z?羰M߭mJkɫN,KKexepQת=w(1_'YY{2Xe!^Khӣ("+UA|W&ӧǚqgj®Uy>4$jLE1luؒ95Z M^dSJc>4]_Q0$Q)vԥbw#=!I@Gv!"H쒞Ўbr69І%όGo]X>4}.9~xFK/s ML?M(<>7{ e tc}~Mw8f&-ӥ6&`ıO_7xYۚ%*'8/8ɺ}{yW` Nm(p$8{%p|+-;q=j7^uQ 󨌙yŐ++jŭ Q2E06nvPSIbԕhEzXvD\v'N^ya*[Nx*5׺v2#E*,c+^`QܗO!f*}PT\9:}9 J kW:AIZR}I6`V'v Eb.sC1pdWW+ix1͞yZ0g 8UBn|vڮlږmv"pAB?ɉ;m-lNhʋMEµu,:4 '>tH8NA4#@'̺,DPƁa(x?GrGˑ魳WK&Gܞ nd09q۶ϟA'̌9r P wUpVE=w/IMmuj[o3q6 endstream endobj 97 0 obj <> stream xڵZ$5 bgZ}@sCz c'V] =Ե8vMr>/Ѫ-,ΪܲxT'JUϟ߿}87E|,|}^-IE|.HUZӛ˯{Qv^kW~Sv>*/ 5}r~# M)c=}-o[0#;}kNb=:$T$2p].,>5Uv"+ e{BizLCXAƸ^y}g`CfaH6 Aمd[!\HM<Qa*0ka| J/$1@}Q7R.eBgeM19=`]O.kVdmEQ"[:zRRHh`!SSSbj˔E܈ʲqIAݠ;{>BUiJbK}ᯭTM̮5W/Ej*BP>uxɂ NH;/_NnXL Hr#LjFz˜"J[N03;aS;!'QnorT%KZYn"PZL4VҔs@W NPPZ8q|4\\ 0&Ơ'݀$:Ra> Lƪt@@G,  u)'Zˤ ǕQ]êCxOiB^w,_:DW_N:[eȮ=4Ik^JIhSv蕩-97RBC՟^EBr\#%#(O7M29čn,[;duRTEz\Q_t275= ^q^ǯfܚavGKև1?Gև8t7df Xu&2 R1>3()?E!Kֲ{#];V^X_Ϙ ޿ | >a =T !ʓ.4nA,G93_eZ4։Ĭej\?ܴz϶j&dI,!4w)rۻ%H<=H*.f̫3JuS?YZrLn==jhXU1SެBjh khuH ymWuMCoS8Խs(Ke͒M?4Mܥ5FmVPZz5>5!0|V9Ni x_"{LެE HX |\ 7AoL_W Y% K8uQav!*~pٜK 7FهZY|uEytF]a'\Ì, / MpŸ`NTzni+<1Ǖm#<qo4n endstream endobj 102 0 obj <> stream xZˎ+Ͱ"A/s2ior&* wESE-/h !7~}Q,(a9oEP-X~YGX@#,EEH?_N^%|i|Bf4i`xq>A1 "#K](lԀ-$39aL2pgfFVQ;Ѳ67,+D*4&ݗik)q9+S :{]3ja'l>DF_(@T>`֌y[Fe^un>"e W_*@@ bp"bR"&UTɿ`uc[튋o$ yU[G#tyYNYDQKmb&zE]'BE/'`\:_oOĴҒ͒r,AY lVi',o#NIPNLueL:|dj5 88eZ}2HR?$yQUHեShv; 1 jso)WdZBF7<\;j)oNҩ7T)`Gs9=[#؜s,X׷\&,亇B,jf‡٣[~E+%ᓡÚ{{vq2qSÕCu؝Qk H~tˊHDW7LjYU cB SdQV 'ϽZl%])RKՐ,f&#A8!xZH'@Hb~/s0 +P ~FV˕7<1#%mY,t T=:7#_KL -+T'PX?' _!@F8"B ֥3TvߛٙRgvKͦ'Ue GꉜifŘY,p&ymacxCXyhy3Hʓ ݨ2b>+hYf.uߥ[Qm|(wl3VJ}E?6ANNun-,"j&gf8":OGeenQy? ;78w[ܠpS0ݒzaܠQ-ڎf/s9(Skh^3kV̢  LqPC( IlW_1Rޞ,$⟮BnCv).w0>7-ͩ$`Qdb)"KrcC~!?-6t)-=ufE׳Z&D "L`g =bkj3md&X [ػ`Fg#Mb6 Zi`U :+[#Vhze3+@Ub~^δ>9mƜkZ_)Uzgo5OǪfG00$2Mކ@!L] ]C UtY:ua.|W)3kX3%V5(-6[sF8fh:~ ,7@!ʈg@po xUie`>ڳd=|݊-gע0l )҆UdzgdNΎO$  &{*t0anv&P^(nxp/}Td:@ی9$!>5p Q39ۅAiXm5f0&].38G]Qcȧ C{{,)q˦ ErÁX迳˾+2li( qOX\>wNv{6NV!:8y9uǡpmg'c=]^AU{_}=[ 0fb'5_Tl42mi#aQL~4"jg̏Ua,r@ͣ:sv4}ӆc_K%%|["/T~jEz vV]rx=@|Pt_鴺9Y;mYa6Y:789ȑ7t8d8X U> stream xX͎6 )QE r(.۶s+za/IQ ZŌ`Kɏ*P@%F]??FS@*\PuV];c2sN'F7^I6mdkN63q?l*Gp "$"9fI6G)f/&@u0^ԾpǷwF X>7Hu[!&38|iEY;FGT|&߆΍:jK+<#"#' 58A4)ỤHqEDRkjOɡu\}z7/ʹhK01FB8?jadG؜ΕF(I\$`H!H(GhRM'qMr $P]K 9'RH2lpDo >d|ODA| l ǩ|d6$Xt)6j` ipI}YV&?կ3CEicU! X9uNy'71N1#>:z/ftBH nyYjiy5qchfNkG͜ }Z-^W3T!F6 mn3a&s}81ٔխɛX\nxsʼn07X 1# ds-m~12 vb{΋NPcq->ؤ B)8wbDU0w0o0;GژAt9Sr]쏲fapN"eG<ލ=Vy,>u@#iU1ӥ$*ֹcE"bI q ųi_k,ڈUHcS} ZG;~?qo%Q\RpjNZ:R8wk]vDrZZˏk>xF~xG4RP5/u|$DJdkAvR]띃|>yQiޜ$So+U`_E6c)̣.TKAE3[#ke빮|:A08Z?0`Zu|gߋ:P׸ZB^ d?jbk"pQ~6WׯWWJv|/x/lKS0!@&_$޹K4 ֥~  9 \| q.÷ey_^|LB\kmu*NZX!pz n椮7/ƭLppb4VɒXmm< ki={ڿ6=CNܓo endstream endobj 110 0 obj <> stream xY;7 +,Ro0E@$)&.&?Dfvo.y1#HQeE,^Οi~?.>Z%`y-ΨrQ jyhVp/Zn4v̯L@#~GI3L&gBc@:x ,NTЕ(q3i836"A9mE\3bt"b#&D1[xy r6Hh `g]٭*k3bQueaG2 f>|tKRb_EBMr=ғ+m;ܷ2X سtm!j k_C uf*%tߋTr@*|n3@/qkm8jTN}4eq@LNyoT!uwBX<:{EIk~k{+OGsH+iD3tI=h ?GCݕ1eu裺muk3OzGNjLnCP>p73՟cJi/&g"udmXıXǻ x.Գ6Gy!;}hLG*Hv@* VˢLVS6ګՆORL 3M"OrZ+י|mzLCr(&ͤ[tۗN{oT5Ra\νϺr^sGf,>oR93hp'Soⅼ5fq ۶n\kQtCjjvTeCfQD .ݽ!{KYzŇ_Mpp}{lkcnܐ,TrHA]-]8*})z~`LT[A29_V.2s\qHY7] J7v]&97=>KT x[mH*A #K'Nk~8wߕ~*ο;b 5$#D9xWygQށ`iŠp.E-2z+¹0Y!!\\=ƽe(sKC86|GC2u-m9 `2ޣ\scV+)T+kkL+AM|jkzR/Ѝπn2Ǝ@`!nR_ڿmy΅vd ~7p-bp~7{Z8$WZؐȒi \mKvp1d`ujl[qN4AL 6ԻmvbBosYWxP[Ӧﰥ u/ mdT l߅ĵp;+')'~yostӋmeK,N endstream endobj 227 0 obj <> stream x}_k0y$Z[Aֶ0-Y6Jjg ][_<'' G7I穪;̠cZUQְ#$겳r_<sc~{ `{zʗ"`lHZ`r:N4,V:}biլΌ tlJ2Ɏm=q'q:A , mQ.W̢e$zS~ڨŬWs> cCP kUgo2N^_ϑDH3;߹BLQ+|KΌֿƴJ^ӫ\ɂtA_$ FDH)O4%"\ y7!" 9o H$nh< c+_i)QSvl)VCMk\xZ endstream endobj 228 0 obj <> stream x}Qk0yE[mA!c6-Ƴj"ѲO\a~GΎUٯF:VV0ꃑ+*ٍƲyk`:Ξ,Ⱦt$Z}1d1X%d9YǶ:QfA`1f}p;ܭMR;vRIMT+ 1'R6XӯqB TqSԶA۫gCO$ZD+$>E `+ ǧwqe2pNK%+yhS[|Nj(!)F|qaƽ2#z!8 J!ޟc> stream x]M0,ERպveoi@MBCjvaxqM'QBV:5OI%x?}iMLY >O0젤Z,_ j <4:s"qi̓,d; Y\ZA; {9Ib| :8&pbo< $0Vflx8{D4HRGJrR#HJ* U"TзF_5[Kmk?|vʆy;*R,<]#mYtc(?i ^[cơ]&#$I+=eQ+ endstream endobj 230 0 obj <> stream x}M0@>nU?0 E*VM"d6Q êm籙;^޴H߆ښ̹ !/ش HTD[~TOe\ YR_>owS9>~7[9JJ`ք[Nw,{lٌѬ,w;vVa\pQ;i.͂dk endstream endobj 231 0 obj <> stream x]j@}L)AW BHTm:B\͆6o_1dA囝?I5~ӽ(CvV-JYv\f?=&_E?} fjߊxjhȽ<)`yZ\6="1}~0fh{5Vlh)Jic1O5T%@WrV 'fQ6Y_s¶4~$ D QF"qi-<<>*K]IwxcOǣX28НR&ХXo5ƆVq5OB񏎻pYqz . N|N+SWc>Ų endstream endobj 233 0 obj <> stream xڝywTT 9G83l%jEcNJa΀A HzH[,I1V&79fϽ~{Л7wok=}XԧH,YՁ=#&b!V>aaA}|ݧTRܠpw;x/;YDGĈXA"s'",DCEa"ؘDG̈Y1Abs1'"-CŖabd>&dh$ dh!JA( F(Ah/'0%N?6YbrR2ZRև3Os7s&NolYɀKIr iÃ1h~-=#);l!nZ5Oˉk-- 3i؁aU:}- V1Oq|xAH(mzMOUOKer[ cХ\Yd[0! 2j/ ܋߿Hc9M% m & 'd !~4wΏ zѽ'E}I{a5thZb--f=N~[דf%z?t.&sHAwVO[ޟxdocgF&Uo+Q{V%zݮ^ԮJ87 =eGS_Ƿc4)Z mzvkxn&-pzDId"!A.L'L axjb[p7\pBhySh/"Wkvfca)SXEU>FÇh&f6Dž?H??&C1e*X:}Ey4z(wשO2d0,2]džahtlܻqx3Kƞ"I mmAD`r;XA͐yJx[6G\ؘ9:N죋t?"[C-#m6#08> h9=m22z15>2xHee(2KФfBaOpf:>(%2DCaqL2YKj`B4C7pO`egRQѩ6"k}Nu0R_F1[b`DЅrL+WCc<_XY Ŋ&uJM]ưDrҔ%uuv0SϺ| x-1݇C9pDH&$7"c4#6Gx&u=eέG, &)X&=8eKhFV4J# ^,+~x(׈8?W#*ٓ-oV/)a{s4G5ܔ_juOF11ճIp`E$$FX+*Ѩtu##{|;+y0&}s,":X>u~ˈ`".5,ט !K`bgcf w }IjyѫeyL@=a#1ُa-JFm,xZD7 w`҆Ȫ;a -o oV/a@]Hg/9WxVAi5E/:KRc01'}1)k2N0)yƶ=TΔ.ehb7(̟+; {fr"l(:YRعgX/hi2$Q@%(ϣI*J(U枠Z%:W8tK=:\yi~Iz+GN RC,|(`00N3s{z Td L~hfD!DWyޅt檔{v2oWTLJ\K$wl`4T'deJԩ 1d jLIo(1ozlZ&I5_iеv},z ;](=Vc ؄HEĮ7DL6+)\.=wQ7wE\]YL,ۜn0gYwbĝ'X*X| *3ԟuC>xQFd> s ,h:7\@q(e4W}*O*^Hm'mTVGb)pSuw%$/[7Q1zC:~ ?#w cc 7'?_jo"ʫ-x{\'ʸ\/O67~)6i(!LU>1hKYwdh**PWQcLL5!w~+rx0?EϥjP8gt|Mpi-fwAs5d$^|Gj7Y a*e]"Vg40X—'5Fqa^jaDx4_vKŢ}{EsԓoFEyJEy nm:/UH:ğYEᷢ@E5xw#XCI#t sݱ(8[_XАŻ*uɵ^xv".GӘ3 Ib =#Wsv=w#[STD16l o^C.{fU|ΛH1NK?+/mb\d7Ұa$-Xq~K7tsiWmt [OA,̂ސE%nUFKS NaG ]9Kgb9CHj2tV?1nLe[вXL++QLmu~vԢOӢtF8';\VY VD&ozUvA Te1[,Djd(C`+oR7m]RqR%iC+}+#2 aj/ݿAmOI  !K)vw%h[ϝ0Y [O2?STtU'tiIOMDF0JR &edj>ހ(-D,{3 v h]j'F!hMj .WBgt5 HWZߪld=Z@͎Ka;nܬڰOe}n:x9o,w qCY2XeVnfN˛'uhuR (= U:O)ևOd+Zs1^YLI5Ek?S $ṔӟP.@Ɛ=6`ҷ~S0[?t|LN3yi0 V'͂ktj]z~t$HVF]0tRW'6)_a ]louj'vsKAHۨҩ?ur2' ) X".CDbh4l4fgf&OKlT{Efr&]AN(IǜyW3#qMWȃd,"ERz09URmVTE*57G'ǧ`;ȋ!I֦ jBGr5++1L0 ?zav52\А,  (FgcqrΔX1R IO r( ':V"IA0kI okDjQe픴N :2VwlJY;8L2q7Z^1E- wwTS3gHJ_+&zWLvv[xh;{6@_p. [3bxEi|*]eЏUsy"?139I3%f _jx <ަoރcay Y_BmGC_WNJNtN =^6顃AW/$ lPn)3.%輵 }fakL&dsԁVP=Iy>*:V"3~(š>0m9XOwk53آ#:,.b`vI% f~ܵ|M& ǜGnY1~ &$Ɨj;&j>~./d0o}屢喎k[kΏe_ǜo]T\j]3kzYr~|v* yะ2cp)]0 !0l_^#L_yf-"!-gK$5Z'duRBdf.7[Ő*v%0`P2^>jb~5#$Fyr[wa8 ]L&8i!>a⿋|D0IqW&˦?S{e vl[FEQ]6av& N1drMs`YCc8PcjuG8VЧ˸~v O_.lJW1=FQzrM+oi}sϮ>?񵌻m{Ƴ4{}B\^p0/@O{{,y@ 9_bn? >PH' cKa,}3q[2ncZ,5x0H$ezyLls+%} fMeoPky endstream endobj 235 0 obj <> stream xڛ܀`qy `9  0i endstream endobj 237 0 obj <> stream xUX \OQ3یMEܥֽJE .DlB؉%,wٷ@uVOZ}V_ϴ}7~ߛe~3wrssfF*0@"J^n>^h/un:R :"|]!̷DA"bY,Esm,O cILMHD=|3tf3umf h!JdNKT-;%K-@[:Jm)fTt񪀨`U%SVՑqxUt+Lruju.Noet&(^(*TVO&A14a:)S>ˏH?6Q?08Љ4:5c()d}Aҥ6AA@c(fU֢rq i&/f-&=^a=(0X(#;hgjMa'N6̧+(xVQٹvgcW)XOuI2 KW C\hu?EQQXT"|ɃO9vcsƯgn<? [j+LAtRdƒ頄!n>܌{FM~ &&k A\6d("+'-&sL\7rk 01&'zDƅ1\!o-hߙA Tݭa:Y)"vX=r:^:Wރv ץ2gKA=tY?|n/e!, \Ć'C|2t5ŏ;ktJ^JQ. [vF4kS= 0 &fC{EQzd-+NkjJ=,[Ɓy3W#SEEe$~ .yų/?04<` UY`?ȤmB,6Ԕ,ڏ -=.s݆ Tte`* }yrnm#AQ::[ۑP)"j͆P%[)<@(ʻ "hԆ XUxwS`4(Xn>%__>iU-cXbRRTu5EM!ml.L37b:30ASɏs=;?v~X_ ltAءz22z^H+ַo^mIebzZ Ąk YC{O kI"+~9!N Zح JGMs-ob9GR4g51 Cd^ ]Wxbn;FZulQ3-jD&oTD(:&MG+RdO(ܟÄ>)#klLE a,"#`XBS#(3 0,AWd-LT 37F5L72ӊKŻ# uTd4$؈;JJ/_Љ/ܞLLS_&002K8C=rTtC'y[åƃ}Bhii| _[_qn1'{]ޜMI[j{)e^_AԢY"όd~K^{Kx'\;:b#FŲ9mK>]z?pq,6Q0/i|g_'% 9BVoİF0RtkeLB͓!f7ٯ =WvFЖl,hF$<&MD )CU2  R0_W؄w!/[Vu@,-tzИo2V#Vl;}sKX]#K6)6:.GyeDGtߗC,őv?}5Uto{d*` `v$S_I5XXd| ,h#x'ٲW 9~L:K620>$JᮂGD/جJH=7Wm`f݇T(oݡ2}|~Vӷ2܋zǠxfw“mדI SSC&dp? y1# M;TsXK%7`xS/Q endstream endobj 239 0 obj <> stream xڛÀ020 62 endstream endobj 241 0 obj <> stream xڭXyXTWms#b+%ƸNvNMPA#*8:4j0  :[Kx74f|߽VΩ_m gfI$+wmj.Y7/6~ebTisFJQɉ*#Jšf[,%xf·%p `W_rb9Ҵr'H, Qd}3.:KBbÔ &j+_jԚQ]Tm| ]X5{?yV3XYn|zr rVVGYϷδ.~Tr.omxXq饗- N*g>}["ؼ,NWm^ ~ 0c5 ]eB^ \ngVrl۲}|s$U.%NwgU*e."ٿ?wρt\\ْq2q!\WUr`ћfyVkhZO`O~ҿVdo=O:ΜX`̂ (.BQ$ğl /~12Tp?xyq鲪M+Hu ]S2;"iEp:ZMǂ?'D{#4Lj NTu-7]2ŭ=A:Әγ&AL6„j>hqxS.ɉ1e7EOk3ieƒRqIƺR r&G'32Uge$m9H0K⟅9t,ގ±86S|.*!'ć/%4E_>CO*N)XƶR$6\GW)w !eUեeUNj(#ߎԈ }ky_)Z8S?n!jz n.UUtu'UuW㪹v*N4F;MdHhxf K KO cUrH%X@*Zg2E.I?0!y9q `, "u|Eu&s03ͦ[8AvLǘ/9` fkLzX/'git8/U4dy^^޵evSE5g瞮)`?Y;E[NuO~yV,6Td |# KE"n\xκ d~^oz eؚ7(r<RwP-QgHP0_|#>+ݨDs7F 2SPoG/ J x@@Jop5@d[E_5#rЩt4]Mϟx3yjca,DlƉuO5imJ;HRj= o=`|iJ0p˽O7A5c,Z+ax 'B%}`Xt}R)U~4z~ve|-t>MG=\d|VW\>LP{+&ܮzIcK~owaǀNw}I'(9_V' Tl}r&Au^w\qieDh6>BIߙ'RwI?UW3(!ew ]r |>*{)A<.y&n8f8fU^oLنm7PA/Cͩ^ѷM74ɕ@̂,#jfxXщYUE>/9ĂK(OݶF4bZHnzP*4vIqe' T"MoH/^f⎮%7wn34̻Zzߚk h!t V Ie>.9L7廅C'Faܯmhk+ȅ+|I@7bYkV? ;î6đ-f JwRf׷&d)Ҥ`gxdI퐩/M61L947K3d>Gf_0_-}c(&\]!kUPԂf S S0mT S{ܑR1 ژHD4^4~Vcxq#@"A r=9@^ņvɹxT&Le=m5ڇԖȬ8cSбP>9HC6Re7閊սHH#=jd[WnK~i=PNhྫྷQ_^Du~l 2G}|TaE0nE *xESK` d%Dعش qJZ55-%;C0GVX^Y^w:CqAQ3KGdg\ڗALNJMKD{@O:O#AFE ͕%GDǰrzD GU]nsc41fM 5h6܁E[)Oo\ŶЩ 73ghڃdqND'#Dו+Oɍu>MqsCHu-1QūLx[2l5KfjӜA aW姱ZXJQySuj_pJ-t *gsumF+ͺfTiof ޟПl.kT[P[*rM]'gω tMZVD&d#KV$= .i_p,ɮVZkxHx'G0>n7 ƍ(m`#]MO3,7<2\Dؠg%əc `t`^:fRuB?Cų޺a沈nŪo]934%kyMUĠt EX0:DŠIƼ,">hJIy\NUd'ȱi(4Y.NFDA=5OTN[c 26 mtgiԏ_G寵,I9,au&wn>^DҦ}ņ̞Vo5 (~Sq3'.9ɖv,s 2fu:Ϙ ol.)Ł@2:O|7|6?)f ʞ2P{;ǖM0+d->ºl1/ffSmlmv9'rM髴,5[4[Z,7–H endstream endobj 242 0 obj <> stream x!e endstream endobj 244 0 obj <> stream xڍYyXS׶OLR9ԫ^^u*Z81 IHAE@Ȥ@eGEDʼn*8:kjmW{׉;D{Ir~{/BX, CW/pʗoVNZYi/kc+䜬EH\kQwGSj>![>_:#q֣,֣B;xo_:H5y)2N jp_<9J?'r_feI8\{F,~>jenA>*A~~JDy\ T;M2eiSM}?Tʃy/"Ձr)PVf#}a|^E72reP@0'bxxxxxxxx8Bq^"n@ 2"{| )jlFh#hceccck3f̆q8mpǐ!-CZrvlۿη]ivtr 9"? <Lq[XV-|6@8TfJE/N<b@VhM+MG*#-Kdd< {JVdYvwG.,:p[C'{pM|Vy6%z%1#sK>Sp"_ym8QA@2z":wa& ٬ZfC8kqsqׯa/UYXuζet1ҙ Šg}xO0GUx .0v0i<Ix#P+% ܁p ^뎂%{k:Ӌ@*Y=Kx﹔ BxL§T8ᦎӬ1K,HAiv]~,{3_ʔnA7\ovuƎfD<q 2T vqŹpWqnv${c[_}, >C+Z_N%a*܁ "{p4"^l#. sܳ?g_`Ŀ?|r&"T]^nL$1A8_|7- Lj/oP­fB|c^舖6\M~ $Sd:W, G˘`%sS=^=dd7Xmhu+li{+XIyΆTӣj,~%\GyѪݑc~ ~N>;[(l v8ƒsy<B2G$i3 "Vpid2%ϜHX!w1i'd2<1?q ;*[Nf»^3L|8iۄ %#0Z¶ %nYafNE*F"A(V{}ud<6IӹlƭG_UD,D %7wcQ|<+YPݍs9d`ĤdCðx^G>[eߩgIϬKu` ’^))o?׳Kd]y9΋7z᪋ + n k"Uh 75l^YZǩKE@-WwӼ/؛| ?a=Z,ܰ#R m0T@}30˷%ڎ[^Ns] I,ֱ k !(0xZ0+!:@B1*d%H% ޯ$0 b{7v-U/`=jhkG!c5"ULd! <؅4<ϛVd4dNa x0##C,O5Za!H^{Oq<7i=w{^-&^0jzT|ˉj,:)7`O1{{KӼW.W,(x"YiIfϒY/cƁ8+W,~!)HBdkYRoGyh\l!#iuƈLM),D>o>nKtBݰ%_ݘ#-9XH@&`N ãѴ(}4tX&~VTΣBƃ'..9 U_\6'j>n'J%Ⱥ(ք0ltu|'A)QɱzYpֈCc~ *4oڕ Y7#|E W =]q-Zs+ ϻ70 /0omj6G"sJYSamexc]A=Xϑ<ʕP|+K@Cؔ(/Ll|0/ă~I)xYhrC5b!tܜ*in8))$i1q)l!Քk4AQuuŅt^ 66taMvޭ긔hC!`wl]Q`s'}Y^VB#Պ :lKq],˺e)JTZݒ)c~YHcj W}(K-AtlR^_.k7x`x6t?aA];$kKRCx0 ka,FdDr>bJv&%GŒǮ5zӵXW{^u[*b4Fd& &)+Ә^i`k4 "ռNN/-ʨW(+mO~J_+y0vkG5nhϋ?A6p H`?4KmfF:ޞs"%/ue2>I@r:5 =6ZJI` k}VƓ4bgN}luنC Yx%M^lV-9{ WUv1/I|}g$kJ\rB'BwƞRU:7$cjsqWbf#jߨ491ZIZQvx-! 2kI~tCU&(t:TTjzF%9%[’<>*XB8 4ܪ ;~*< (\b(ޝ۝!9ߠMrWt 'huL\n=~ɟjOf"Q(5U[U*_Kr+/q;v!ֽИ礮kVw}[`ٚ盏aJPqߍ4< ^ud No=OcfaI݂;@v3$W<:n7_+321yHEk ?_et7&c #sq8iCZځ?o{Ȓ5 :;Ҹp+:.}'`Cᩓ΅S0zˉx&vO1La \dM>.ݔZB?V|>wu3r7}Zwwun^$2ΗJʚAB:v<>CRrr퉹(CoF6D Rh%Ѩ/W93qBካta/1ZXx " Nv:|bix.r׮< &*0lvߥˊ8GA,Uղ%}PǾ8}6"N"(OqyL6aAYٗ?0K`gsU;$ckV^^cSA1 0GƢ| ^+tۼxJa$nh NZBݷ-.6!z+{4>EIl1xY4Z/I'ǎhRT~~"Mj/ޫ 4`+0DǨo6XFˣn~:W@Q (2h-ҨEBSYTQHolQ-yּRBfqOD+}],=uV}vg.8 ȟypwNeVlTT?A?M0oo:2ZSЎ^r[%t|;{Jμ06"cާ(ڝ]NR&k9f+3/L¤Н&%Ky0]pv;9 rIΗ JC|KIEuSm$2䈜'l5V<; R0Q?aWS po&c= iqQ&7yKO{L_竰9} N|!|<|v"5E$S3#!^tɸ.,~,#FZlWG[LbU|-Y[=R˟AC8z@0\; ++ӐGx$ڪGUwmLC~┪ endstream endobj 246 0 obj <> stream xk``"N?0fPAI@ȀRaȂ Rxl'edR- Zp-6 J endstream endobj 248 0 obj <> stream xmWy\ 1kF3Ѫ=U+nԥZP\Ö !d!DV @l,Kj**ZjknszNw<1̛<7C\]APn-M-JA7VO+ = vS8u~ց Dd5Ms&U94yTYg?""UQ"XM~<ZR)#0yXJ\Q J5o&DEhRb#S'nUMeL2**E=KURzJlB#]0ox-5EWj\$j#TܺIUqpR& & $&B$D;Ax/bb2Ex1& >w` \D"qe'½{AMh)HhHmx$ǹXqsgA"wxȻb(  DG_>C=Kb%6}f3Zj1- U'9,~9kYZQ$55^mSxJb h#MQzZIwCSGUWc :D7{?)@9"y -'ݨ\mb%4P'INpgC54؊_q4X/ >%)PaUy300NP,fi+xH}|fVީaa-p-Geg77n0_r?Y u+=0Cǰ\$u~T-y}lZ7Z&o%w 5V6?:vnSy8<:!]ݭLqQiv[5MU ?">5O1=vIzcp|Һ dN؁$U6>x v6jSh-(/xmEzNAl~qk08PW^i᝻%gWN3$z7J]'K#}JOm#V3xIʩ Ъi<OƳZQȧLUI0ۏBDi}-_ҷlLך.x(f  Bxn928L+%9JbY&JY*guy2>J2meSU3dK]q f\g:;G9;eHrr?HF:0!Ls^c62N,MhQunrLCkΈ5{ȑz;R,p)8fiBjN:SR\ )D%W v1\ XڜSj.i2-g$Đ7Yex?ؚ*9o%NݑTm7Z>ֲ.vVItæ3C:~SMUY2DyjF[5(:9ibVC ӢG'SPKMn>b41x\{6763 c?#1 ] nr?0Mfʊ3~[E-WX$M޿+yκ G{W,q]yq ]y]ΎZj53\.%FQaIy;k'xAH|.v?̶r{}؋0Qq:]Wېbe5I![kOY?*;x*͛pÐLl KhODzx\ۖs!a<5\} 鸣dD)͕Ӳ r rA0Yւj[j*&pC#s}^4K-}g/ӭs /WvA EwBRvx Qm ԥ>u]’x= %t*uHf'=2͝&n8~+1=N0{OwHpysE1D6/G49-;h=m:81݊⵻%x`x)w7b2W ]qÄ mˁxQhJڔ_QYS2`HfΈd:(.Omt&ԥٖk )?t̩>i_?fA*봴\9vKk{1CV?]x<QN_-uԬ2_| +V5u9ZeمQ);.YmWp>Yii iz~_*a{/ܳW(e ۀC_g:֚e7K`+dQ#$wHml1w偗#LqS (ATT]VXI;̡T)ůhC>{{8SdI۝Ԥظzu/C]D6Q[YE& KT#J6"1"K{^/zoyB{PuXU/!kE=K endstream endobj 250 0 obj <> stream xkja?W1 yJ A endstream endobj 10 0 obj <> stream x\ےGr}Wr`w/#x1WcIKZZb=#x1hɿ9U tÑbF*bgɬ*&! "hP -NZh-,V8(T 0Z"z)) %ShŸW e1#VN @[@H)/xs`͚ !{Gc “oz%O is{aTG@cV"H @ up/ 0(Dҡ}O. Nm ~B#WCRVЮquy{Oz]]d[7L./UpWzP GDuO2 eQgk46#~UGQy'+@H 1{}Eo[хA P,q=! }g CKS%э >w{s*m:p˳>$pLx`zyl/ n v%xY4] m>!w#!t@YR(̛!|K!Q'>=1U벘& T]jxq 5~K<(8z|Aޓx)?Jp%wP,R1=3|{E1S :IM|T_?G8Æ^I2Y0R.r=DǪ.JwbdzA2R*U833V~KWr^l׉]5;{k[3T pl`_a=Njqp{4: c)rTtU=W##9CNuPv9/c\=mCځQA|yqm=:@wqg9פs,Low_Fߍlٝ߁YIw4MM|)ʜTd:#8 s>N{/VbAqϭ*e*f5bU( g)':ہyjYi5] Kw'^p=ppyE- zUc7L,ҙk7+<Ǧun|h]5WWR£KTui_S}j˛oxxɺYn5Z6=O^CG$X_Z6y}us bZ^h` ?3@ ӷ}~~+5л6bôol;kK-3fm a\_0 ke4ﬕos*xm[w2Ia3 rhGne.u9zݛ7^]}i]YݮFpٓP"0]}Gw?7JDRJݸĘ7R~D"v )0]{k#% Y 戭}]xtT'E E)%<(JHP( %w%tJ|Msrûc}_%ev1 cwuT[j_BVĖz(,ưRGTz׼}1=T#Eb%$9璆f>rȹs)1$QGDR GTvfd1 z/SPe0 wv!(^?Ih }΢RZٴwi},} ͖hCΚZ+ JۻFRu&qm3O^>o^u3ٮO|h~o?z\]/n?v;.:5NOgW7_`7⢰ɶyQ%gy*Ef"Mړv1ϛv1_/nWmc^WdՈww<>n ;[ cf6*NwgE1cX;77y>㓽 l"!nH{5K·C{~&|R 4X\Chv_ sE1wv7eD>bGWc5|yXEkq2̀1*05 p#t`FV%9|8i_<> A 8pX1)y& ̧t9[ɏ) eJ=#4jߝ#@P'=SE ##=f"6hK39&"_U <2FadDĞa6γك $b[>{*E{8CBk2UԢ4j`R@qd %G޳DETcД LHha3$Qɞ~ ϞO}>-z+ؗ g {15K0}^ރ܍q䓅 lrfK흽5ݵ˿>cK(Oko5;67˫U^MՏEi'3,b qpM-fʒǾ&iף~Wf)j꿱U3y.zɥiQ<5!?/øO_,jgXML-_rAy3_4&ړ0m+O) h XJ|Xy9Gp#m_yG0l4CR endstream endobj 251 0 obj <> stream xڵmO@)4&P wǩE{ؤ|\b0_gP@A p.0R`h0p)p<  7prB&%$Om6Kel8)ŻÅ&.~`2M܆Si> vXN|WR"4wGv<,Pҋy "bqZsZ2_X2M.'>.NS/K0"84Ű:.bPX(-{R8+|gIsR#e;2I-p)A`x S2gd珑[e;Wfy:lɽ="4)*<|Ux./ePUt ~R )J4l@)|@P`At7(NP8|#J-PVe[$5BLsBn7d/3!H$AP׍EQS+p8p5yq˞VqQ ИAS{v} hИȭFpq9vOQ} -j$m@Mm"gF endstream endobj 252 0 obj <]/Root 1 0 R/Info 2 0 R/Size 253/W[1 3 2]/Filter/FlateDecode/Length 589>> stream x5YTNQMG Ee(SH 3d2+P(H&CUuròtoZgo6x?GcQ iW鸢 zwqp]_ux*0X$xK%,$x#؃SgzJݬbǮM%%.cJ|I*  6Qכq/G%9j 0짒Ra:*3 -sZSPPo0lH#TN>hRkfcR)2hl1*L>7B܏98A<0Ev\p9 +q)'q5|gK,|\)8\z\K1LÍ p nm8gN,܅31w܋pnǣx gq,Xpyx/y,2›x V|!>—X؈/)>WJNVue7lWRil?SoUyi5;2;ɖ& 0m&R)1\lґuQ?7 endstream endobj startxref 78481 %%EOF actuar/inst/doc/modeling.Rnw0000644000176200001440000005150315147745722015563 0ustar liggesusers\input{share/preamble} %\VignetteIndexEntry{Loss distributions modeling} %\VignettePackage{actuar} %\SweaveUTF8 \title{Loss modeling features of \pkg{actuar}} \author{Christophe Dutang \\ Université Paris Dauphine \\[3ex] Vincent Goulet \\ Université Laval \\[3ex] Mathieu Pigeon \\ Université du Québec à Montréal} \date{} <>= library(actuar) options(width = 52, digits = 4) @ \begin{document} \maketitle \section{Introduction} \label{sec:introduction} One important task of actuaries is the modeling of claim amount and claim count distributions for ratemaking, loss reserving or other risk evaluation purposes. Package \pkg{actuar} features many support functions for loss distributions modeling: \begin{enumerate} \item support for heavy tail continuous distributions useful in loss severity modeling; \item support for phase-type distributions for ruin theory; \item functions to compute raw moments, limited moments and the moment generating function (when it exists) of continuous distributions; \item support for zero-truncated and zero-modified extensions of the discrete distributions commonly used in loss frequency modeling; \item extensive support of grouped data; \item functions to compute empirical raw and limited moments; \item support for minimum distance estimation using three different measures; \item treatment of coverage modifications (deductibles, limits, inflation, coinsurance). \end{enumerate} Vignette \code{"distributions"} covers the points 1--4 above in great detail. This document concentrates on points 5--8. \section{Grouped data} \label{sec:grouped-data} Grouped data is data represented in an interval-frequency manner. Typically, a grouped data set will report that there were $n_j$ claims in the interval $(c_{j - 1}, c_j]$, $j = 1, \dots, r$ (with the possibility that $c_r = \infty$). This representation is much more compact than an individual data set --- where the value of each claim is known --- but it also carries far less information. Now that storage space in computers has essentially become a non issue, grouped data has somewhat fallen out of fashion. Still, grouped data remains useful as a means to represent data, if only graphically --- for example, a histogram is nothing but a density approximation for grouped data. Moreover, various parameter estimation techniques rely on grouped data. For these reasons, \pkg{actuar} provides facilities to store, manipulate and summarize grouped data. A standard storage method is needed since there are many ways to represent grouped data in the computer: using a list or a matrix, aligning $n_j$ with $c_{j - 1}$ or with $c_j$, omitting $c_0$ or not, etc. With appropriate extraction, replacement and summary methods, manipulation of grouped data becomes similar to that of individual data. Function \code{grouped.data} creates a grouped data object similar to --- and inheriting from --- a data frame. The function accepts two types of input: \begin{enumerate} \item a vector of group boundaries $c_0, c_1, \dots, c_r$ and one or more vectors of group frequencies $n_1, \dots, n_r$ (note that there should be one more group boundary than group frequencies); \item individual data $x_1, \dots, x_n$ and either a vector of breakpoints $c_1, \dots, c_r$, a number $r$ of breakpoints or an algorithm to determine the latter. \end{enumerate} In the second case, \code{grouped.data} will group the individual data using function \code{hist}. The function always assumes that the intervals are contiguous. \begin{example} \label{ex:grouped.data-1} Consider the following already grouped data set: \begin{center} \begin{tabular}{lcc} \toprule Group & Frequency (Line 1) & Frequency (Line 2) \\ \midrule $(0, 25]$ & 30 & 26 \\ $(25, 50]$ & 31 & 33 \\ $(50, 100]$ & 57 & 31 \\ $(100, 150]$ & 42 & 19 \\ $(150, 250]$ & 65 & 16 \\ $(250, 500]$ & 84 & 11 \\ \bottomrule \end{tabular} \end{center} We can conveniently and unambiguously store this data set in R as follows: <>= x <- grouped.data(Group = c(0, 25, 50, 100, 150, 250, 500), Line.1 = c(30, 31, 57, 42, 65, 84), Line.2 = c(26, 33, 31, 19, 16, 11)) @ Internally, object \code{x} is a list with class <>= class(x) @ The package provides a suitable \code{print} method to display grouped data objects in an intuitive manner: <>= x @ \qed \end{example} \begin{example} \label{ex:grouped.data-2} Consider Data Set~B of \citet[Table~11.2]{LossModels4e}: \begin{center} \begin{tabular}{*{10}{r}} 27 & 82 & 115 & 126 & 155 & 161 & 243 & 294 & 340 & 384 \\ 457 & 680 & 855 & 877 & 974 & \np{1193} & \np{1340} & \np{1884} & \np{2558} & \np{15743} \end{tabular} \end{center} We can represent this data set as grouped data using either an automatic or a suggested number of groups (see \code{?hist} for details): <>= y <- c( 27, 82, 115, 126, 155, 161, 243, 294, 340, 384, 457, 680, 855, 877, 974, 1193, 1340, 1884, 2558, 15743) grouped.data(y) grouped.data(y, breaks = 5) @ The above grouping methods use equi-spaced breaks. This is rarely appropriate for heavily skewed insurance data. For this reason, \code{grouped.data} also supports specified breakpoints (or group boundaries): <>= grouped.data(y, breaks = c(0, 100, 200, 350, 750, 1200, 2500, 5000, 16000)) @ \qed \end{example} The package supports the most common extraction and replacement methods for \code{"grouped.data"} objects using the usual \code{[} and \code{[<-} operators. In particular, the following extraction operations are supported. (In the following, object \code{x} is the grouped data object of \autoref{ex:grouped.data-1}.) <>= x <- grouped.data(Group = c(0, 25, 50, 100, 150, 250, 500), Line.1 = c(30, 31, 57, 42, 65, 84), Line.2 = c(26, 33, 31, 19, 16, 11)) @ \begin{enumerate}[i)] \item Extraction of the vector of group boundaries (the first column): <>= x[, 1] @ \item Extraction of the vector or matrix of group frequencies (the second and third columns): <>= x[, -1] @ \item Extraction of a subset of the whole object (first three lines): <>= x[1:3, ] @ \end{enumerate} Notice how extraction results in a simple vector or matrix if either of the group boundaries or the group frequencies are dropped. As for replacement operations, the package implements the following. \begin{enumerate}[i)] \item Replacement of one or more group frequencies: <>= x[1, 2] <- 22; x x[1, c(2, 3)] <- c(22, 19); x @ \item Replacement of the boundaries of one or more groups: <>= x[1, 1] <- c(0, 20); x x[c(3, 4), 1] <- c(55, 110, 160); x @ \end{enumerate} It is not possible to replace the boundaries and the frequencies simultaneously. The mean of grouped data is \begin{equation} \hat{\mu} = \frac{1}{n} \sum_{j = 1}^r a_j n_j, \end{equation} where $a_j = (c_{j - 1} + c_j)/2$ is the midpoint of the $j$th interval, and $n = \sum_{j = 1}^r n_j$, whereas the variance is \begin{equation} \frac{1}{n} \sum_{j = 1}^r n_j (a_j - \hat{\mu})^2. \end{equation} The standard deviation is the square root of the variance. The package defines methods to easily compute the above descriptive statistics: <>= mean(x) var(x) sd(x) @ Higher empirical moments can be computed with \code{emm}; see \autoref{sec:empirical-moments}. The R function \code{hist} splits individual data into groups and draws an histogram of the frequency distribution. The package introduces a method for already grouped data. Only the first frequencies column is considered (see \autoref{fig:histogram} for the resulting graph): <>= hist(x[, -3]) @ \begin{figure}[t] \centering <>= hist(x[, -3]) @ \caption{Histogram of a grouped data object} \label{fig:histogram} \end{figure} \begin{rem} One will note that for an individual data set like \code{y} of \autoref{ex:grouped.data-2}, the following two expressions yield the same result: <>= hist(y) hist(grouped.data(y)) @ \end{rem} R has a function \code{ecdf} to compute the empirical cdf $F_n(x)$ of an individual data set: \begin{equation} \label{eq:ecdf} F_n(x) = \frac{1}{n} \sum_{j = 1}^n I\{x_j \leq x\}, \end{equation} where $I\{\mathcal{A}\} = 1$ if $\mathcal{A}$ is true and $I\{\mathcal{A}\} = 0$ otherwise. The function returns a \code{"function"} object to compute the value of $F_n(x)$ in any $x$. The approximation of the empirical cdf for grouped data is called an ogive \citep{LossModels4e,HoggKlugman}. It is obtained by joining the known values of $F_n(x)$ at group boundaries with straight line segments: \begin{equation} \tilde{F}_n(x) = \begin{cases} 0, & x \leq c_0 \\ \dfrac{(c_j - x) F_n(c_{j-1}) + (x - c_{j-1}) F_n(c_j)}{% c_j - c_{j - 1}}, & c_{j-1} < x \leq c_j \\ 1, & x > c_r. \end{cases} \end{equation} The package includes a generic function \code{ogive} with methods for individual and for grouped data. The function behaves exactly like \code{ecdf}. \begin{example} \label{ex:ogive} Consider first the grouped data set of \autoref{ex:grouped.data-1}. Function \code{ogive} returns a function to compute the ogive $\tilde{F}_n(x)$ in any point: <>= (Fnt <- ogive(x)) @ Methods for functions \code{knots} and \code{plot} allow, respectively, to obtain the knots $c_0, c_1, \dots, c_r$ of the ogive and to draw a graph (see \autoref{fig:ogive}): <>= knots(Fnt) Fnt(knots(Fnt)) plot(Fnt) @ \begin{figure}[t] \centering <>= plot(Fnt) @ \caption{Ogive of a grouped data object} \label{fig:ogive} \end{figure} To add further symmetry between functions \code{hist} and \code{ogive}, the latter also accepts in argument a vector individual data. It will call \code{grouped.data} and then computes the ogive. (Below, \code{y} is the individual data set of \autoref{ex:grouped.data-2}.) <>= (Fnt <- ogive(y)) knots(Fnt) @ \qed \end{example} A method of function \code{quantile} for grouped data objects returns linearly smoothed quantiles, that is, the inverse of the ogive evaluated at various points: <>= Fnt <- ogive(x) @ <>= quantile(x) Fnt(quantile(x)) @ Finally, a \code{summary} method for grouped data objects returns the quantiles and the mean, as is usual for individual data: <>= summary(x) @ \section{Data sets} \label{sec:data-sets} This is certainly not the most spectacular feature of \pkg{actuar}, but it remains useful for illustrations and examples: the package includes the individual dental claims and grouped dental claims data of \cite{LossModels4e}: <>= data("dental"); dental data("gdental"); gdental @ \section{Calculation of empirical moments} \label{sec:empirical-moments} The package provides two functions useful for estimation based on moments. First, function \code{emm} computes the $k$th empirical moment of a sample, whether in individual or grouped data form. For example, the following expressions compute the first three moments for individual and grouped data sets: <>= emm(dental, order = 1:3) emm(gdental, order = 1:3) @ Second, in the same spirit as \code{ecdf} and \code{ogive}, function \code{elev} returns a function to compute the empirical limited expected value --- or first limited moment --- of a sample for any limit. Again, there are methods for individual and grouped data (see \autoref{fig:elev} for the graphs): <>= lev <- elev(dental) lev(knots(lev)) plot(lev, type = "o", pch = 19) lev <- elev(gdental) lev(knots(lev)) plot(lev, type = "o", pch = 19) @ \begin{figure}[t] \centering <>= par(mfrow = c(1, 2)) plot(elev(dental), type = "o", pch = 19) plot(elev(gdental), type = "o", pch = 19) @ \caption{Empirical limited expected value function of an individual data object (left) and a grouped data object (right)} \label{fig:elev} \end{figure} \section{Minimum distance estimation} \label{sec:minimum-distance} Two methods are widely used by actuaries to fit models to data: maximum likelihood and minimum distance. The first technique applied to individual data is well covered by function \code{fitdistr} of the package \pkg{MASS} \citep{MASS}. The second technique minimizes a chosen distance function between theoretical and empirical distributions. Package \pkg{actuar} provides function \code{mde}, very similar in usage and inner working to \code{fitdistr}, to fit models according to any of the following three distance minimization methods. \begin{enumerate} \item The Cramér-von~Mises method (\code{CvM}) minimizes the squared difference between the theoretical cdf and the empirical cdf or ogive at their knots: \begin{equation} d(\theta) = \sum_{j = 1}^n w_j [F(x_j; \theta) - F_n(x_j; \theta)]^2 \end{equation} for individual data and \begin{equation} d(\theta) = \sum_{j = 1}^r w_j [F(c_j; \theta) - \tilde{F}_n(c_j; \theta)]^2 \end{equation} for grouped data. Here, $F(x)$ is the theoretical cdf of a parametric family, $F_n(x)$ is the empirical cdf, $\tilde{F}_n(x)$ is the ogive and $w_1 \geq 0, w_2 \geq 0, \dots$ are arbitrary weights (defaulting to $1$). \item The modified chi-square method (\code{chi-square}) applies to grouped data only and minimizes the squared difference between the expected and observed frequency within each group: \begin{equation} d(\theta) = \sum_{j = 1}^r w_j [n (F(c_j; \theta) - F(c_{j - 1}; \theta)) - n_j]^2, \end{equation} where $n = \sum_{j = 1}^r n_j$. By default, $w_j = n_j^{-1}$. \item The layer average severity method (\code{LAS}) applies to grouped data only and minimizes the squared difference between the theoretical and empirical limited expected value within each group: \begin{equation} d(\theta) = \sum_{j = 1}^r w_j [\LAS(c_{j - 1}, c_j; \theta) - \tilde{\LAS}_n(c_{j - 1}, c_j; \theta)]^2, \end{equation} where $\LAS(x, y) = \E{X \wedge y} - \E{X \wedge x}$, % $\tilde{\LAS}_n(x, y) = \tilde{E}_n[X \wedge y] - \tilde{E}_n[X \wedge x]$ and $\tilde{E}_n[X \wedge x]$ is the empirical limited expected value for grouped data. \end{enumerate} The arguments of \code{mde} are a data set, a function to compute $F(x)$ or $\E{X \wedge x}$, starting values for the optimization procedure and the name of the method to use. The empirical functions are computed with \code{ecdf}, \code{ogive} or \code{elev}. \begin{example} \label{ex:mde} The expressions below fit an exponential distribution to the grouped dental data set, as per example~2.21 of \cite{LossModels}: <>= op <- options(warn = -1) # hide warnings from mde() @ <>= mde(gdental, pexp, start = list(rate = 1/200), measure = "CvM") mde(gdental, pexp, start = list(rate = 1/200), measure = "chi-square") mde(gdental, levexp, start = list(rate = 1/200), measure = "LAS") @ <>= options(op) # restore warnings @ \qed \end{example} It should be noted that optimization is not always as simple to achieve as in \autoref{ex:mde}. For example, consider the problem of fitting a Pareto distribution to the same data set using the Cramér--von~Mises method: <>= mde(gdental, ppareto, start = list(shape = 3, scale = 600), measure = "CvM") @ <>= out <- try(mde(gdental, ppareto, start = list(shape = 3, scale = 600), measure = "CvM"), silent = TRUE) cat(sub(", scale", ",\n scale", out)) @ Working in the log of the parameters often solves the problem since the optimization routine can then flawlessly work with negative parameter values: <>= pparetolog <- function(x, logshape, logscale) ppareto(x, exp(logshape), exp(logscale)) (p <- mde(gdental, pparetolog, start = list(logshape = log(3), logscale = log(600)), measure = "CvM")) @ The actual estimators of the parameters are obtained with <>= exp(p$estimate) @ %$ This procedure may introduce additional bias in the estimators, though. \section{Coverage modifications} \label{sec:coverage} Let $X$ be the random variable of the actual claim amount for an insurance policy, $Y^L$ be the random variable of the amount paid per loss and $Y^P$ be the random variable of the amount paid per payment. The terminology for the last two random variables refers to whether or not the insurer knows that a loss occurred. Now, the random variables $X$, $Y^L$ and $Y^P$ will differ if any of the following coverage modifications are present for the policy: an ordinary or a franchise deductible, a limit, coinsurance or inflation adjustment \cite[see][chapter~8 for precise definitions of these terms]{LossModels4e}. \autoref{tab:coverage} summarizes the definitions of $Y^L$ and $Y^P$. \begin{table} \centering \begin{tabular}{lll} \toprule Coverage modification & Per-loss variable ($Y^L$) & Per-payment variable ($Y^P$)\\ \midrule Ordinary deductible ($d$) & $\begin{cases} 0, & X \leq d \\ X - d, & X > d \end{cases}$ & $\begin{cases} X - d, & X > d \end{cases}$ \medskip \\ Franchise deductible ($d$) & $\begin{cases} 0, & X \leq d \\ X, & X > d \end{cases}$ & $\begin{cases} X, & X > d \end{cases} $ \medskip \\ Limit ($u$) & $\begin{cases} X, & X \leq u \\ u, & X > u \end{cases}$ & $\begin{cases} X, & X \leq u \\ u, & X > u \end{cases}$ \bigskip \\ Coinsurance ($\alpha$) & $\alpha X$ & $\alpha X$ \medskip \\ Inflation ($r$) & $(1 + r)X$ & $(1 + r)X$ \\ \bottomrule \end{tabular} \caption{Coverage modifications for per-loss variable ($Y^L$) and per-payment variable ($Y^P$) as defined in \cite{LossModels4e}.} \label{tab:coverage} \end{table} Often, one will want to use data $Y^P_1, \dots, Y^P_n$ (or $Y^L_1, \dots, Y^L_n$) from the random variable $Y^P$ ($Y^L$) to fit a model on the unobservable random variable $X$. This requires expressing the pdf or cdf of $Y^P$ ($Y^L$) in terms of the pdf or cdf of $X$. Function \code{coverage} of \pkg{actuar} does just that: given a pdf or cdf and any combination of the coverage modifications mentioned above, \code{coverage} returns a function object to compute the pdf or cdf of the modified random variable. The function can then be used in modeling like any other \code{dfoo} or \code{pfoo} function. \begin{example} \label{ex:coverage} Let $Y^P$ represent the amount paid by an insurer for a policy with an ordinary deductible $d$ and a limit $u - d$ (or maximum covered loss of $u$). Then the definition of $Y^P$ is \begin{equation} Y^P = \begin{cases} X - d, & d \leq X \leq u \\ u - d, & X \geq u \end{cases} \end{equation} and its pdf is \begin{equation} \label{eq:pdf-YP} f_{Y^P}(y) = \begin{cases} 0, & y = 0 \\ \dfrac{f_X(y + d)}{1 - F_X(d)}, & 0 < y < u - d \\ \dfrac{1 - F_X(u)}{1 - F_X(d)}, & y = u - d \\ 0, & y > u - d. \end{cases} \end{equation} Assume $X$ has a gamma distribution. Then an R function to compute the pdf \eqref{eq:pdf-YP} in any $y$ for a deductible $d = 1$ and a limit $u = 10$ is obtained with \code{coverage} as follows: <>= f <- coverage(pdf = dgamma, cdf = pgamma, deductible = 1, limit = 10) f f(0, shape = 5, rate = 1) f(5, shape = 5, rate = 1) f(9, shape = 5, rate = 1) f(12, shape = 5, rate = 1) @ \qed \end{example} Note how function \code{f} in the previous example is built specifically for the coverage modifications submitted and contains as little useless code as possible. The function returned by \code{coverage} may be used for various purposes, most notably parameter estimation, as the following example illustrates. \begin{example} Let object \code{y} contain a sample of claims amounts from policies with the deductible and limit of \autoref{ex:coverage}. One can fit a gamma distribution by maximum likelihood to the claim severity distribution as follows: <>= x <- rgamma(100, 2, 0.5) y <- pmin(x[x > 1], 9) op <- options(warn = -1) # hide warnings from fitdistr() @ <>= library(MASS) fitdistr(y, f, start = list(shape = 2, rate = 0.5)) @ <>= options(op) # restore warnings @ \qed \end{example} Vignette \code{"coverage"} contains more detailed formulas for the pdf and the cdf under various combinations of coverage modifications. \bibliography{actuar} \end{document} %%% Local Variables: %%% mode: noweb %%% TeX-master: t %%% coding: utf-8 %%% End: actuar/inst/NEWS.2.Rd0000644000176200001440000003063515147745722013736 0ustar liggesusers\name{NEWS} \title{\pkg{actuar} News} \encoding{UTF-8} \section{LATER NEWS}{ This file covers NEWS for the 2.x series. News for \pkg{actuar} 3.0-0 and later can be found in file \file{NEWS.Rd}. } \section{CHANGES IN \pkg{actuar} VERSION 2.3-3}{ \subsection{BUG FIXES}{ \itemize{ \item{Fixed declaration of the interface to a function imported from \pkg{expint} to comply with option \code{-fno-common} that will be the default in gcc starting with version 10.0.x. Thanks to Joshua Ulrich \email{josh.m.ulrich@gmail.com}, maintainer of \pkg{xts} and \pkg{TTR} for proposing the fix.} \item{Correction of the formula for the moment of order \eqn{k} for grouped data in \code{?emm}. Thanks to Walter Garcia-Fontes for the heads up.} } } } \section{CHANGES IN \pkg{actuar} VERSION 2.3-2}{ \subsection{BUG FIXES}{ \itemize{ \item{Fixed generation of random variates for the logarithmic distribution with \code{rlogarithmic} when \eqn{p > 0.95}. Thanks to Sam Thompson \email{samuel.thompson14@imperial.ac.uk} for the report and patch.} \item{Fixed generation of random variates for the zero modified geometric distribution with \code{rzmgeom} when \eqn{p_0^M > p}{p0m > p}. Thanks to Christophe Dutang \email{dutang@ceremade.dauphine.fr} for the report.} \item{Fixed the formula for the variance of the zero truncated negative binomial distribution in the man page. Thanks to Daan Gerard Uitenbroek \email{Daanuitenbroek@ggd.amsterdam.nl} for the report.} \item{Fixed a typo in vignette \dQuote{distributions} in the formula of the survival function for zero-modified discrete distributions.} } } \subsection{USER VISIBLE CHANGES}{ \itemize{ \item{Add circular references between Pareto and Single Parameter Pareto man pages. There was no reference to the Single Parameter Pareto distribution in the man page for the Pareto and this generated questions from time to time on how to compute the former. The new note and 'see also' should solve this.} } } } \section{CHANGES IN \pkg{actuar} VERSION 2.3-1}{ \subsection{NEW FEATURES}{ \itemize{ \item{Vignette \dQuote{credibility} now contains an appendix summarizing the formulas in the linear Bayes cases.} } } \subsection{BUG FIXES}{ \itemize{ \item{\code{cm} with \code{formula = "bayes"} stopped in the Gamma/Gamma case even though the parameter \code{shape.lik} was provided. Thanks to Vincent Masse \email{vincent.masse.4@ulaval.ca} for the report.} \item{Component \code{weights} of the return value of \code{cm} in the \code{formula = "bayes"} case was wrong. This had no impact on premium calculation and was visible in the output of \code{summary} only. Also, it caused an error when \code{data} was \code{NULL} or missing.} } } } \section{CHANGES IN \pkg{actuar} VERSION 2.3-0}{ \subsection{NEW FEATURES}{ \itemize{ \item{\code{cm} can now fit linear Bayes models for the following combinations of likelihood and prior distributions: Poisson/Gamma, Exponential/Gamma, Bernoulli/Beta, Geometric/Beta, Normal/Normal, Gamma/Gamma, Binomial/Beta, Negative Binomial/Beta and the less common case Single Parameter Pareto/Gamma (where the Bayes estimator is linear, but not a credibility premium). Thanks to Christophe Dutang \email{dutang@ceremade.dauphine.fr} for the idea.} \item{\code{rcomphierarc.summaries} is now an alias for the man page of \code{simul.summaries}.} } } \subsection{USER VISIBLE CHANGES}{ \itemize{ \item{In \code{summary} results for credibility models, a level \dQuote{section title} is no longer printed for one-level models.} \item{All instances of function \code{simul} in vignette \code{\dQuote{simulation}} replaced by \code{rcomphierarc}.} } } } \section{CHANGES IN \pkg{actuar} VERSION 2.2-0}{ \subsection{NEW FEATURES}{ \itemize{ \item{Functions \code{rcompound} and \code{rcomppois} gain an argument \code{SIMPLIFY} that is \code{TRUE} by default. When \code{FALSE}, the functions return not only variates from the aggregate claim amount random variable, but also the variates from the underlying frequency and severity distributions.} \item{Functions \code{rcompound} and \code{rcomppois} now admit an object name in argument for \code{model.sev} and \code{model.freq}.} } } \subsection{BUG FIX}{ \itemize{ \item{Display of verbatim blocks in vignettes.} } } \subsection{USER VISIBLE CHANGES}{ \itemize{ \item{In the man page for \code{dgenpareto}, additional note on the link between the Generalized Pareto distribution in the package and the version used in Embrechts et al. (1997) and Wikipedia. Thanks to Marcel Trevissen \email{kamath1602@gmail.com} for the pointer.} } } } \section{CHANGES IN \pkg{actuar} VERSION 2.1-1}{ \subsection{BUG FIX}{ \itemize{ \item{Usage of \code{R_useDynamicSymbols} to preclude compilation \code{NOTE}s, better registration of native routines and reduced symbol visibility.} \item{Vignettes no longer use LaTeX package framed as it was not found on OS X in CRAN builds.} } } } \section{CHANGES IN \pkg{actuar} VERSION 2.1-0}{ \subsection{BUG FIX}{ \itemize{ \item{\code{qinvgauss} was not computing quantiles as far in the right tail as \code{statmod:::qinvgauss}. This is now fixed. Thanks to Gordon Smyth \email{smyth@wehi.edu.au} for pointing it out.} } } \subsection{USER VISIBLE CHANGES}{ \itemize{ \item{Support for the incomplete gamma function and the exponential integral has been moved to package \pkg{expint}. Therefore, \pkg{actuar} now imports these functionalities through the \pkg{expint} API.} \item{Consequence of the above, the non exported functions \code{gammaint} and \code{expint} are deleted from the package.} \item{Section 6 on special integrals of the \code{\dQuote{distributions}} package vignette was revised to better introduce the incomplete gamma function, the incomplete beta function and the related integrals.} } } } \section{CHANGES IN \pkg{actuar} VERSION 2.0-0}{ \subsection{NEW FEATURES}{ \itemize{ \item{New support functions \code{[dpqrm,lev,mgf]invgauss} for the inverse Gaussian distribution. The first three functions are C (read: faster) implementations of functions of the same name in package \pkg{statmod}.} \item{New support functions \code{[dpqrm,mgf]gumbel} for the Gumbel extreme value distribution.} \item{Extended range of admissible values for many limited expected value functions thanks to new C-level functions \code{expint}, \code{betaint} and \code{gammaint}. These provide special integrals presented in the introduction of Appendix A of Klugman et al. (2012); see also \code{vignette("distributions")}. Affected functions are: \code{levtrbeta}, \code{levgenpareto}, \code{levburr}, \code{levinvburr}, \code{levpareto}, \code{levinvpareto}, \code{levllogis}, \code{levparalogis}, \code{levinvparalogis} in the Transformed Beta family, and \code{levinvtrgamma}, \code{levinvgamma}, \code{levinvweibull} in the Transformed Gamma family.} \item{New functions \code{expint}, \code{betaint} and \code{gammaint} to compute the special integrals mentioned above. These are merely convenience R interfaces to the C level functions. They are \emph{not} exported by the package.} \item{New support functions \code{[dpqr]poisinvgauss} for the Poisson-inverse Gaussian discrete distribution.} \item{New support functions \code{[dpqr]logarithmic} and \code{[dpqr]zmlogarithmic} for the logarithmic (or log-series) and zero-modified logarithmic distributions.} \item{New support functions \code{[dpqr]ztpois} and \code{[dpqr]zmpois} for the zero-truncated and zero-modified Poisson distributions.} \item{New support functions \code{[dpqr]ztnbinom} and \code{[dpqr]zmnbinom} for the zero-truncated and zero-modified negative binomial distributions.} \item{New support functions \code{[dpqr]ztgeom} and \code{[dpqr]zmgeom} for the zero-truncated and zero-modified geometric distributions.} \item{New support functions \code{[dpqr]ztbinom} and \code{[dpqr]zmbinom} for the zero-truncated and zero-modified binomial distributions.} \item{New vignette \code{"distributions"} that reviews in great detail the continuous and discrete distributions provided in the package, along with implementation details.} \item{\code{aggregateDist} now accepts \code{"zero-truncated binomial"}, \code{"zero-truncated geometric"}, \code{"zero-truncated negative binomial"}, \code{"zero-truncated poisson"}, \code{"zero-modified binomial"}, \code{"zero-modified geometric"}, \code{"zero-modified negative binomial"}, \code{"zero-modified poisson"} and \code{"zero-modified logarithmic"} for argument \code{model.freq} with the \code{"recursive"} method.} \item{New function \code{rmixture} to generate random variates from discrete mixtures, that is from random variables with densities of the form \eqn{f(x) = p_1 f_1(x) + ... + p_n f_n(x)}.} \item{New function \code{rcompound} to generate random variates from (non hierarchical) compound models of the form \eqn{S = X_1 + \dots + X_N}. Function \code{simul} could already do that, but \code{rcompound} is substantially faster for non hierarchical models.} \item{New function \code{rcomppois} that is a simplified version of \code{rcompound} for the very common compound Poisson case.} \item{\code{simul} now accepts an atomic (named or not) vector for argument \code{nodes} when simulating from a non hierarchical compound model. But really, one should use \code{rcompound} for such cases.} \item{New alias \code{rcomphierarc} for \code{simul} that better fits within the usual naming scheme of random generation functions.} \item{Functions \code{grouped.data} and \code{ogive} now accept individual data in argument. The former will group the data using \code{hist} (therefore, all the algorithms to compute the number of breakpoints available in \code{hist} are also available in \code{grouped.data}). \code{ogive} will first create a grouped data object and then compute the ogive. While there is no guarantee that the two functions are backward compatible (the number and position of the arguments have changed), standard calls should not be affected.} } } \subsection{USER VISIBLE CHANGES}{ \itemize{ \item{The material on probability laws in vignette \code{"lossdist"} has been moved to the new vignette \code{"distributions"} (see the previous section).} \item{The first argument of the \code{mgffoo} functions has changed from \code{x} to \code{t}. This is a more common notation for moment generating functions.} \item{In \code{aggregateDist} with the \code{"recursive"} method, if the length of \code{p0} is greater than one, only the first element is used, with a warning.} \item{\code{aggregateDist} with the \code{"recursive"} method and \code{model.freq = "logarithmic"} now uses the new \code{dlogarithmic} family of functions. Therefore, parametrization has changed from the one of Klugman et al. (2012) to the standard parametrization for the logarithmic distribution. Basically, any value of \code{prob} for the logarithmic parameter in previous versions of \pkg{actuar} should now be \code{1 - prob}.} \item{The aim of vignette \code{"simulation"} is changed from \dQuote{simulation of compound hierarchical models} to \dQuote{simulation of insurance data with \pkg{actuar}} as it also covers the new functions \code{rmixture} and \code{rcompound}.} \item{Vignette \code{"lossdist"} is renamed to \code{"modeling"} and it is revised to cover the new functionalities of \code{grouped.data} and \code{ogive}.} } } \subsection{BUG FIX}{ \itemize{ \item{An old and nasty out-of-bounds bug could crash R when using the \code{"recursive"} method of \code{aggregateDist} with a frequency distribution from the \eqn{(a, b, 1)} family. The bug went unnoticed before because there was no example for the \eqn{(a, b, 1)} case in the man page.} } } \subsection{DEPRECATED}{ \itemize{ \item{Functions \code{[m,lev,mgf]invGauss} that complemented functions \code{[dpqr]invGauss} of package \pkg{SuppDists} are deprecated in favor of the new complete set of functions \code{[dpqrm,lev,mgf]invgauss}.} } } } \section{OLDER NEWS}{ News for \pkg{actuar} 1.2-2 and earlier can be found in file \file{NEWS.1.Rd}. } actuar/inst/NEWS.Rd0000644000176200001440000003407715151206331013561 0ustar liggesusers\name{NEWS} \title{\pkg{actuar} News} \encoding{UTF-8} \section{CHANGES IN \pkg{actuar} VERSION 3.3-7}{ \subsection{NEW FEATURES}{ \itemize{ \item{Appendix C of the \dQuote{distributions} package vignette now provides the formulas for the first three cumulants (hence the mean, variance and skewness) of the zero-truncated and zero-modified distributions. They are not used in the package code, but we developed the formulas at some point and opted to record them in the vignette for posterity.} } } \subsection{BUG FIXES}{ \itemize{ \item{The quantile functions for the zero-modified distributions now correctly handle the case \code{p <= p0}. Thanks to Philippe Leblanc for the report and pointers.} \item{The quantile functions for the zero-modified distributions now correctly handle the following corner cases: \code{p0 = 1}; limit values of the parameters with \code{lower.tail = FALSE} or \code{p0 = 0}; \code{p = 0} with \code{p0 = 0}.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.3-6}{ \subsection{NEW FEATURES}{ \itemize{ \item{\code{?ruin} gains an example of mixtures of Erlang distributions for claims and interarrival times.} } } \subsection{BUG FIXES}{ \itemize{ \item{\code{ruin} now works correctly for mixtures of Erlang distributions. Closes issue #4. Thanks also to Jorge Yslas and Paula Rocha Rodriguez \email{paula.rocharodriguez@alum.uca.es} for the report.} \item{\code{var} and \code{sd} for individual data now correctly use the value of argument \code{na.rm} (closes issue #5). Thanks to Ge Zhang for the report.} \item{\code{pinvgauss} and \code{qinvgauss} now use a gamma approximation when the coefficient of variation is very small (ported from \pkg{statmod} v1.4.28).} \item{\code{rinvgauss} now accurately handles large or infinite values for the mean or dispersion (ported from \pkg{statmod} v1.4.29).} \item{\code{qinvgauss} no longer takes an initial value outside the domain of convergence with the gamma approximation for small right tail probabilities (ported from \pkg{statmod} v1.4.30).} } } \subsection{OTHER CHANGES}{ \itemize{ \item{Replacement of a few API entry points for C API compliance introduced in R 4.5.0.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.3-5}{ \subsection{NEW FEATURES}{ \itemize{ \item{The \code{print} methods for objects of class \code{"cm"} and \code{"summary.cm"} now use the option \code{deparse.cutoff} to control the printing of the call to \code{cm}.} } } \subsection{BUG FIXES}{ \itemize{ \item{Appendix A of the \dQuote{distributions} package vignette still listed the root \code{pareto2} as an alias for \code{pareto}. Moreoever, the root for the Pareto II distribution was wrongly listed as \code{pareto4}.} \item{The equation for the mean of the zero-truncated Poisson distribution in the HTML version of the help page contained an inappropriate power 2 in the denominator. Thanks to Brad Biggerstaff \email{bkb5@cdc.gov} for the heads up.} } } \subsection{OTHER CHANGES}{ \itemize{ \item{Package vignettes now use Fira Sans for sans serif text.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.3-4}{ \subsection{BUG FIXES}{ \itemize{ \item{\code{rcompound} will now correctly retrieve the simulation models passed down from other functions as expression objects.} \item{One error message in \code{rmixture} was quoting the wrong argument.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.3-3}{ \subsection{BUG FIXES}{ \itemize{ \item{The generics \code{elev} and \code{ogive} no longer rely on local variables added to the environment in which the method is evaluated by \code{UseMethod}. This \dQuote{feature} should be removed from R in the next major release. Thanks to Luke Thierney \email{luke-tierney@uiowa.edu} for the direct notification and for the pointer to a fix.} } } \subsection{USER VISIBLE CHANGES}{ \itemize{ \item{\code{rcomphierarc} is now the base name for the simulation function of compound hierarchical models, whereas \code{simul} is an alias retained for backward compatibility.} \item{The alias \code{simpf} for \code{simul} (or \code{rcomphierarc}) is extinct.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.3-2}{ \subsection{BUG FIXES}{ \itemize{ \item{\code{rcompound}, \code{rcomppois} and \code{rmixture} evaluate their model arguments in the correct frame for a larger sets of circumstances, notably when called inside another function. \code{?rmixture} provides more information and examples on this matter for that function.} } } \subsection{OTHER CHANGES}{ \itemize{ \item{Package vignettes now use the STIX2 fonts for text and Fira Mono for code.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.3-1}{ \subsection{BUG FIXES}{ \itemize{ \item{Include prototypes for all C level functions to please \code{-Wstrict-prototypes}.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.3-0}{ \subsection{NEW FEATURES}{ \itemize{ \item{Italian translations contributed by Daniele Medri \email{dmedri@gmail.com}.} \item{Package help file; use \code{?actuar} to read.} \item{New entry in the CITATION file for the paper in the Journal of Statistical Software presenting our implementation of the Feller-Pareto family of distributions.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.2-2}{ \subsection{BUG FIXES}{ \itemize{ \item{Replace deprecated (as of R 4.2.0) macro DOUBLE_EPS by DBL_EPSILON in C code.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.2-1}{ \subsection{BUG FIXES}{ \itemize{ \item{Fix incorrect usage of \code{all.equal} in tests.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.2-0}{ \subsection{NEW FEATURES}{ \itemize{ \item{Generic versions of \code{var} and \code{sd} with methods for grouped data. The default methods (for individual data) call the standard functions of the \pkg{stats} package. Grouped data methods contributed by Walter Garcia-Fontes \email{walter.garcia@upf.edu}.} \item{Method of \code{summary} for grouped data objects contributed by Walter Garcia-Fontes \email{walter.garcia@upf.edu}.} \item{Examples for the new methods for grouped data objects in \code{lossdist} demonstration \R script.} } } \subsection{BUG FIXES}{ \itemize{ \item{Use \code{USE_FC_LEN_T} in the C prototypes of LAPACK functions to correspond to code produced by gfortran >= 7. The mechanism was introduced in \R 3.6.2 and is planned to make its use obligatory in \R 4.2.0.} \item{Miscellaneous fixes to formulas for grouped data in the documentation for \code{mean.grouped.data} and \code{emm}, as well as in the \dQuote{modeling} package vignette.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.1-4}{ \subsection{BUG FIXES}{ \itemize{ \item{Due to its use of \code{log1mexp} since the previous release, the package depends on \R >= 4.1.0.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.1-3}{ \subsection{BUG FIXES}{ \itemize{ \item{Carry over the new implementation of the Cornish-Fisher Expansion of base \R used by \code{qlogarithmic} and \code{qpoisinvgauss}.} \item{Fix computation of \code{[pq]zmpois}, \code{[pq]zmbinom} and \code{[pq]zmnbinom} following fixes to the underlying base \R functions introduced in r80271 of \R sources. With thanks to B.D. Ripley and Martin Maechler.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.1-2}{ \subsection{BUG FIXES}{ \itemize{ \item{\code{qinvgauss} now returns a finite value when \code{1.5/shape > 1000}. Thanks to Bettina Grün \email{bettina.gruen@wu.ac.at} for the fix.} \item{A protection against rounding errors now ensures that \code{qzmlogarithmic(1 - pzmlogarithmic(x), lower.tail = FALSE) == x} is always \code{TRUE}.} \item{In \code{?dburr}, the scale parameter appeared in the denominator of the density instead of \eqn{x}. Thanks to Etienne Guy for the heads up.} \item{The package tests now correctly use \code{stopifnot} with argument \code{exprs} explicitly named.} \item{The formula for the moment of order \eqn{k} for grouped data in \code{?emm} fixed in version 2.3-3 for the LaTeX version is now also fixed for the text version. Thanks (again) to Walter Garcia-Fontes.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.1-1}{ \subsection{BUG FIXES}{ \itemize{ \item{\code{rcompound} and \code{rmixture} now correctly find objects defined higher in the call stack.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.1-0}{ \subsection{BUG FIXES}{ \itemize{ \item{\code{rmixture} now randomly shuffles the variates by default and gains an argument \code{shuffle} (\code{TRUE} by default). Using \code{shuffle = FALSE} restores the previous behaviour where the output vector contains all the random variates from the first model, then all the random variates from the second model, and so on. When the order of the random variates is irrelevant, this cuts execution time roughly in half. Thanks to Adam Kałdus \email{akaldus@wp.pl} for the stimulating comments on this matter.} } } \subsection{USER VISIBLE CHANGES}{ \itemize{ \item{The number of variates returned by \code{rmixture} is now the length of argument \code{n} if larger than 1, like other \code{r} functions.} \item{\code{rmixture} now checks the validity of its arguments.} } } } \section{CHANGES IN \pkg{actuar} VERSION 3.0-0}{ \subsection{NEW FEATURES}{ \itemize{ \item{Support functions \code{[dpqrm,lev]fpareto} for the Feller-Pareto distribution and related Pareto distributions with a location parameter. The Feller-Pareto defines a large family of distributions encompassing the transformed beta family and many variants of the Pareto distribution. Using the nomenclature of Arnold (2015), the following distributions are now supported by \pkg{actuar}: Feller-Pareto, Pareto IV, Pareto III, and Pareto II. The Pareto I was already supported under the name Single Parameter Pareto. Contributed by Christophe Dutang, Vincent Goulet and Nicholas Langevin.} \item{The package now exposes through an API its 200+ C routines for probability functions and the beta integral. This is documented in a new section of the \dQuote{distributions} package vignette. See file \file{include/actuarAPI.h} in the package installation directory for the complete list of exported routines.} \item{Improvements to the accuracy in the right tail of the \code{p} and \code{lev} functions for most probability distributions of the transformed beta family. Achieved by replacing \code{pbeta(u, a, b, lower.tail)} for \eqn{u > 0.5} with \code{pbeta(1 - u, b, a, !lower.tail)} and an accurate computation of \code{u}. Contributed by Nicholas Langevin.} \item{The C workhorse \code{betaint_raw} behind \code{betaint} gains an additional argument to receive an accurate value of \eqn{1 - x}. Used extensively to improve accuracy of the \code{lev} functions for the transformed beta family. Contributed by Nicholas Langevin.} \item{The \dQuote{distributions} package vignette now regroups distributions of the transformed beta families and the single parameter Pareto under the umbrella of the Feller-Pareto family of distributions. The vignette now also includes diagrams showing the interrelations between the members of this family, as well as between the members of the transformed gamma and inverse transformed gamma families.} \item{Exhaustive regression tests for probability functions.} } } \subsection{BUG FIXES}{ \itemize{ \item{Improvements to the simulation algorithm for zero-modified discrete distributions in the \eqn{p_0^M < p_0}{p0m < p0} case. Contributed by Nicholas Langevin.} \item{\code{dpoisinvgauss} no longer returns \code{NaN} for large values of \code{x}. Solved by computing probabilities recursively instead of by calling \code{bessel_k} (the latter would overflow for large \code{nu} and propagate \code{NaN}). Computations are actually about twice as fast.} \item{\code{ppoisinvgauss} now honors argument \code{lower_tail}.} \item{\code{qpoisinvgauss} no longer fails with \code{mu = Inf} and \code{log.p = TRUE}.} \item{\code{betaint(x, Inf, b)} now returns \code{Inf} instead of \code{NaN}.} \item{\code{betaint(.Machine$double.xmin, a, b)}, with \eqn{b < 0}, now returns 0 instead of \code{NaN}.} \item{\code{d} and \code{p} functions for all continuous size distributions now handle limiting cases for infinite scale parameter, or for zero non-scale parameters, consistently with functions of base \R. Affected functions are: \code{[dp]trbeta}, \code{[dp]burr}, \code{[dp]llogis}, \code{[dp]paralogis}, \code{[dp]genpareto}, \code{[dp]pareto}, \code{[dp]invburr}, \code{[dp]invpareto}, \code{[dp]invparalogis} in the Transformed Beta family; \code{[dp]trgamma}, \code{[dp]invtrgamma}, \code{[dp]invgamma}, \code{[dp]invweibull}, \code{[dp]invexp} in the Transformed Gamma family; \code{[dp]lgamma}, \code{[dp]gumbel}, \code{[dp]invgauss}, \code{[dp]genbeta}.} \item{\code{levinvexp} no longer returns \code{NaN} for finite order.} } } \subsection{BREAKING CHANGE}{ \itemize{ \item{Support for the Pareto II distributions comes from functions \code{[dpqrm,lev]pareto2}. These functions were \emph{aliases} to \code{[dpqrm,lev]pareto} in previous version of \pkg{actuar}. The new functions are \emph{not} backward compatible. Therefore, calls to the \code{*pareto2} functions of previous versions of \pkg{actuar} will return wrong results and should be replaced by calls to \code{*pareto} functions.} } } \subsection{DEFUNCT}{ \itemize{ \item{Functions \code{[m,lev,mgf]invGauss} that were deprecated in version 2.0-0.} } } } \note{ \itemize{Older news can be found in files \file{NEWS.2.Rd} (2.x series), \file{NEWS.1.Rd} (1.x series) and \file{NEWS.0.Rd} (0.x series).} } actuar/inst/NEWS.1.Rd0000644000176200001440000002436715147745722013742 0ustar liggesusers\name{NEWS} \title{actuar News} \encoding{UTF-8} \section{LATER NEWS}{ This file covers NEWS for the 1.x series. News for \pkg{actuar} 2.0-0 and later can be found in file \file{NEWS.Rd}. } \section{CHANGES IN VERSION 1.2-2}{ \subsection{BUG FIX}{ \itemize{ \item dpareto() did not handle the case x == 0 correctly. } } } \section{CHANGES IN VERSION 1.2-1}{ \subsection{(MORE OR LESS) USER-VISIBLE CHANGES}{ \itemize{ \item The package now depends on R >= 3.3.0 since it uses chkDots() in a few methods that do not use the content of their '...' argument. \item ogive() lost its argument '...' as it was unused anyway. \item severity.portfolio() calls unroll() directly instead of relying on the default method to be identical to unroll(). } } \subsection{BUG FIXES}{ \itemize{ \item Deleted an unwanted debugging message ("local") printed by CTE() at every execution. \item predict.cm() and summary.cm() now treat the '...' argument as advertised in the help file. \item Fixed bad examples in a few probability law help files that returned unintended results such as Inf or NaN. } } \subsection{MAINTENANCE}{ \itemize{ \item C-level function log1pexp(...) used in a few places in lieu of log1p(exp(...)). \item Names of the internal utility macros defined in dpq.h changed from "R_<...>" to "ACT_<...>" to make it clearer that they are defined by the package (although they were copied from R sources). } } } \section{CHANGES IN VERSION 1.2-0}{ \subsection{NEW FEATURE}{ \itemize{ \item In the computation of the CTE in the Normal Power case, numerical integration has been replaced by the explicit formula given in Castañer, A.; Claramunt, M.M.; Mármol, M. (2013). Tail value at risk. An analysis with the Normal-Power approximation. In Statistical and Soft Computing Approaches in Insurance Problems. Nova Science Publishers. ISBN 978-1-62618-506-7. } } } \section{CHANGES IN VERSION 1.1-10}{ \subsection{BUG FIX}{ \itemize{ \item Results of 'cm' for hierarchical models would get incorrectly sorted when there were 10 nodes or more at a given level. Thanks to Dylan Wienke \email{dwienke2@gmail.com} for the catch. } } } \section{CHANGES IN VERSION 1.1-9}{ \subsection{MAINTENANCE}{ \itemize{ \item Functions 'head' and 'tail' explicitly imported from package utils in NAMESPACE as per a new requirement of R 3.3.x. } } } \section{CHANGES IN VERSION 1.1-8}{ \subsection{BUG FIXES}{ \itemize{ \item Memory allocation problem at the C level in hierarc(). Thanks to Prof. Ripley for identification of the problem and help solving it. \item Abusive use of abs() at the C level in a few places. } } } \section{CHANGES IN VERSION 1.1-7}{ \subsection{BUG FIX}{ \itemize{ \item panjer() result was wrong for the "logarithmic" type of frequency distribution. Thanks to \email{mmclaramunt@ub.edu} for the catch. } } } \section{CHANGES IN VERSION 1.1-6}{ \subsection{BUG FIX}{ \itemize{ \item Fixed a deprecated use of real(). } } } \section{CHANGES IN VERSION 1.1-5}{ \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item Complete rewrite of coverage(); the function it creates no longer relies on ifelse() and, consequently, is much faster. The rewrite was motivated by a change in the way [dp]gamma() handle their arguments in R 2.15.1. } } \subsection{BUG FIX}{ \itemize{ \item summary.ogive() no longer relies on length 'n' to be in the environment of a function created by approxfun(). Fix required by R >= 2.16.0. } } } \section{CHANGES IN VERSION 1.1-4}{ \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item The function resulting from elev() for individual data is now faster for a large number of limits. (Thanks to Frank Zhan \email{FrankZhan@donegalgroup.com} for the catch and report.) } } } \section{CHANGES IN VERSION 1.1-3}{ \subsection{BUG FIX}{ \itemize{ \item Resolved symbol clash at C level tickled by package GeneralizedHyperbolic on Solaris. \item Wrong result given by levinvGauss() because the upper tail of the normal distribution was used in the calculation instead of the lower tail. Thanks to Dan Murphy \email{chiefmurphy@gmail.com} for the heads up. } } } \section{CHANGES IN VERSION 1.1-2}{ \subsection{BUG FIX}{ \itemize{ \item \code{discretize()} would return wrong results when argument \code{step} was omitted in favor of \code{by} \emph{and} the discretization method \code{unbiased} was used. (Thanks to Marie-Pier Côté \email{marie-pier.cote.11@ulaval.ca} for the catch.) } } } \section{CHANGES IN VERSION 1.1-1}{ \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item CITATION file updated. } } \subsection{BUG FIX}{ \itemize{ \item \code{summary.cm()} could skip records in the output thinking they were duplicates. } } } \section{CHANGES IN VERSION 1.1-0}{ \subsection{NEW FEATURES}{ \itemize{ \item New argument \code{convolve} in \code{aggregateDist()} to convolve the distribution obtained with the recursive method a number of times with itself. This is used for large portfolios where the expected number of claims is so large that recursions cannot start. Dividing the frequency parameter by \eqn{2^n} and convolving \eqn{n} times can solve the problem. \item New method of \code{diff()} for \code{"aggregateDist"} objects to return the probability mass function at the knots of the aggregate distribution. Valid (and defined) for \code{"recursive"}, \code{"exact"} and \code{"simulation"} methods only. \item Since the terminology Tail Value-at-Risk is often used instead of Conditional Tail Expectation, \code{TVaR()} is now an alias for \code{CTE()}. } } \subsection{BUG FIXES}{ \itemize{ \item Quantiles (and thus VaRs and CTEs) for \code{"aggregateDist"} objects where off by one knot of the distribution. \item \code{cm()} returned the internal classification codes instead of the original ones for hierarchical models. (Thanks to Zachary Martin for the heads up.) } } } \section{CHANGES IN VERSION 1.0-2}{ \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item Functions \code{m()} and \code{lev()} now return \code{Inf} instead of \code{NaN} for infinite moments. (Thanks to David Humke for the idea.) } } \subsection{BUG FIXES}{ \itemize{ \item Non-ascii characters in one R source file prevented compilation of the package in a C locale (at least on OS X). \item For probability laws that have a strictly positive mode or a mode at zero depending on the value of one or more shape parameters, \code{d(0, ...)} did not handle correctly the case exactly at the boundary condition. (Thanks to Stephen L \email{bulls22eye@gmail.com} for the catch.) } } } \section{CHANGES IN VERSION 1.0-1}{ \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item \code{levinvpareto()} works for \code{order > -shape} and defaults to \code{order = 1}, like all other \code{lev()} functions. } } \subsection{BUG FIXES}{ \itemize{ \item Functions \code{d()} handle the case \code{x == 0} correctly. \item Functions \code{q()} return \code{NaN} instead of an error when argument \code{p} is outside \eqn{[0, 1]} (as in R). \item Functions \code{r()} for three parameter distributions (e.g. Burr) no longer wrongly display the \code{"NaNs produced"} warning message. \item The warning message \code{"NaNs produced"} was not (and could not be) translated. \item Function \code{levinvpareto()} computes limited moments for \code{order > -shape} using numerical integration. } } } \section{CHANGES IN VERSION 1.0-0}{ \subsection{NEW FEATURES}{ \itemize{ \item Improved support for regression credibility models. There is now an option to make the computations with the intercept at the barycenter of time. This assures that the credibility adjusted regression line (or plane, or ...) lies between the individual and collective ones. In addition, contracts without data are now supported like in other credibility models. \item Argument \code{right} for \code{grouped.data()} to allow intervals closed on the right (default) or on the left. \item Method of \code{quantile()} for grouped data objects to compute the inverse of the ogive. } } \subsection{USER-VISIBLE CHANGES}{ \itemize{ \item \code{cm()} no longer returns the values of the unbiased estimators when \code{method = "iterative"}. \item Specification of regression models in \code{cm()} has changed: one should now provide the regression model as a formula and the regressors in a separate matrix or data frame. \item Due to above change, \code{predict.cm()} now expects \code{newdata} to be a data frame as for \code{stats:::predict.lm()}. } } \subsection{DEFUNCT}{ \itemize{ \item Function \code{bstraub()} is no longer exported. Users are expected to use \code{cm()} as interface instead. } } \subsection{BUG FIXES}{ \itemize{ \item Functions \code{r()} are now more consistent in warning when \code{NA}s (specifically \code{NaN}s) are generated (as per the change in R 2.7.0). \item \code{frequency.portfolio()} was wrongly counting \code{NA}s. \item Domain of pdfs returned by \code{aggregateDist()} now restricted to \eqn{[0, 1]}. \item Quantiles are now computed correctly (and more efficiently) in 0 and 1 by \code{quantile.aggregateDist()}. \item \code{coverage()} no longer requires a cdf when it is not needed, namely when there is no deductible and no limit. } } } \section{OLDER NEWS}{ News for \pkg{actuar} 0.9.7 and earlier can be found in file \file{NEWS.0.Rd}. } actuar/build/0000755000176200001440000000000015151412457012635 5ustar liggesusersactuar/build/vignette.rds0000644000176200001440000000063315151412457015176 0ustar liggesusersRN0ute+ \)]yʍB4vqyѼ!B(Bq'BQ1\]8pz(Fx$1\rwȒQ qS (K,b`}[`jی[N%MJ?6WVC6Zs}d[s S3;]}_-eUU+ޓť3 3K7g0-lkPYksrǫ|9?QTWέq/!RV/U[Gȶ;:w 1"8"@mM6"H/HU;'klmruАKH(4^; I@xa <48 IgIQ4@>XLzs#sP : ³϶>*~CsQn[X11C0[6 Z^^f VmM㋚[ߛjU{]hg _u>(y?;d}@\VTY6zV ݒd =WiGfeAZS-Twǂ$s^zb>U8`㾚М4Nڝq$D5q+oxbc8y-mW+ôjY Lv2y(Iؕaw$pTݪ2k .,ws2\x IESgփeinEUjT-ilw $6T2)UgbEz&W-K@]1)dONN,V%4F2˔[X{gU.G/f!5kd8b·ʞl)S6K}MuMeCże *szߓ#5-mld`_PLg-{,kzf:s 6M)+]=M)k1ɐ,Cc`E鄌#RPM̳^|4}\6k}rvZ @avq(t|!y`\sh C1Y.Iv;f#e\})D`6-^Ōeq&"J22 nlcl 'I)A2l>~y7cu'Zy8@ άθA% S Z`FS*q]  n+42˭Iap ɶ+{#Mozwj{zrٌpN8q Wh/л.`޽ן=6< |x0BMށ|'^=_ꫵșZPW.F3{N*U7D6*2h 96 wH:{ZY&Nmۦ\e#.޲ ]M|XA{$|JjJ5"4|hm+q4aBtL ǀ#P,(93 L5C PGJdԟGL3iO= Zw ;:hu(H}'8Gfoϩo7.\ G@`>&ldM =VV%iuT-ۨnjziR̨j=)m6Fk`0w6i:2B~^rtsVF z@OJMBĄk["N&dU"Å&↜[aɞM^C²̀;ܓzfdyXu޺b. ᪚]ERjfHwJv 2#q͙Oy#fHeuz68$ii|nf0zǴzZҊ.eטpP_uKӶ$wa'!7<]TL7p̻Q5!(ms6nElޛCF@2)^8{OFkoXvYñkh!Iivjjpc&K!?>| S PF+jd7/ ]^,|hإ3>ELzxOpnDq+t1s?=g戁]@/6s#3S]IpmF!I܇[0RI,eI%OK}kss %J1JfyriD|{wjɝ_0 9ۯ3RhµAyȑr>T}\@覚ss­, |1n\D#ެi__ +<9OᗇϹfpܵ'۽'ϲE;v ^$#-NݦNw!]G`axKM߇^|5A!75ȑ&R?A^#o@7th*Ap9[E2 <|nR_ t$ KAR2&p r=b{J$} #X^L%C(9 GA@Hː#=d0*H#HLrr{5~lKCt5 'Pj> /BR <| 7Jkih#f6&xI89ɔ"% %$(cwwMhkB楁Nfw`$`r;0pr+;ɀ+k=켈u/@O[QIxSIb3d'%e4-Ox{A;ξO_6VLc`i8 9$ؗ`)7Ir[2%XJx rs%n@8|-"9%?9!LmPqT4e?ei$ mpqJ1^nBG* gy >^R8/wŝ\w@Ev;N:rb'uPMSioG‹ݰ|$l oGnX_ +#M8-g:zrWr&}_A/ޜn5#D4ٕ&[qh&u}aoФxr$>ؐk[0mSyz69iUIz-H!1) CtGKLTRfgG.jPqa W-?!'PgБI/!G:bRRҡCk.p]p0u $_&!@EIg$=߰>VJjDx0p1_y:|J@>6Mg!'ҦI]pr,XCCNof;0]SY9'’tŌ`akI^N&xM\x!ׁ#,E5,G*aFx=a?"Wx#f<veؗ5oK{ J¶nZPvLvesu4ݴ(S`K,;uyJ//S{Pn?gXYHބ퀽m _C4G!6Ɖq#)e.hs n+cH}; TШ"XDTeor6Fj< 9R/*t!q$~ AJ-3Ax1LiI%q$n=X6!ieث}'bKRCI$DE ԖEE @x9(ڳA{?T*¨J%ltƵR9o~g=-jh ($PBWj{dM-DwY->Zʗp8jjEtf~8+ԕӝ*(- 4TΪZJj|̀ʥ#ғz%WB9?BWGXcA=s|zr«JG֙+ȟ @j\uY^8 u<V_Yul־dxQ?E CWp;܀ֶG+g9~dC.eRl:$'֍^T/' VmTį+'!OFҕ~ThS/sJYV`⽌p.l/5ٲRϻ0S+p:d}9@7*˙}>`oZSuFZxݣmj*5G_1\ y*Cs=XKk OZ6SpjèwX5>o^o] +ov6v?L%ek[&n=_mo=9ڇ5Wyy0YźV&;]~dk2֞l*l!NQ3ցzio|_3Sgyp"KY#|2wwvK 3zeɶ3짤gv2Y\yB/Zdޓť3 3KD==lڍYGXՀf:_@]x+~zGRQ~ۘ}|%h>O "^]uےgl a60(.i4fI!6h[Oc${pihj;i^t%f3^X@o6hton53OEgӞmWL=bM 58< pw.nAJb,ŵJ2ܕ6HA^]'3Z7k X A Ӱ$H-zW+XL W,MxL@W%+gO ?!/Ƈ/OU8k&-xNŔp蛎xd ͝7q aTt̻o=8 8ioactuar/man/0000755000176200001440000000000015151412457012311 5ustar liggesusersactuar/man/Pareto3.Rd0000644000176200001440000001063115147745722014127 0ustar liggesusers\name{Pareto3} \alias{Pareto3} \alias{dpareto3} \alias{ppareto3} \alias{qpareto3} \alias{rpareto3} \alias{mpareto3} \alias{levpareto3} \title{The Pareto III Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Pareto III distribution with parameters \code{min}, \code{shape} and \code{scale}. } \usage{ dpareto3(x, min, shape, rate = 1, scale = 1/rate, log = FALSE) ppareto3(q, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qpareto3(p, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rpareto3(n, min, shape, rate = 1, scale = 1/rate) mpareto3(order, min, shape, rate = 1, scale = 1/rate) levpareto3(limit, min, shape, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{min}{lower bound of the support of the distribution.} \item{shape, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The Pareto III (or \dQuote{type III}) distribution with parameters \code{min} \eqn{= \mu}{= m}, \code{shape} \eqn{= \gamma}{= b} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\gamma ((x - \mu)/\theta)^{\gamma - 1}}{% \theta [1 + ((x - \mu)/\theta)^\gamma]^2}}{% f(x) = (b ((x - m)/s)^(b - 1))/(s [1 + ((x - m)/s)^b]^2)} for \eqn{x > \mu}{x > m}, \eqn{-\infty < \mu < \infty}{-Inf < m < Inf}, \eqn{\gamma > 0}{b > 0} and \eqn{\theta > 0}{s > 0}. The Pareto III is the distribution of the random variable \deqn{\mu + \theta \left(\frac{X}{1 - X}\right)^{1/\gamma},}{% m + s (X/(1 - X))^(1/b),} where \eqn{X} has a uniform distribution on \eqn{(0, 1)}. It derives from the \link[=dfpareto]{Feller-Pareto} distribution with \eqn{\alpha = \tau = 1}{shape1 = shape3 = 1}. Setting \eqn{\mu = 0}{min = 0} yields the \link[=dllogis]{loglogistic} distribution. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} for nonnegative integer values of \eqn{k < \gamma}{k < shape}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} for nonnegative integer values of \eqn{k} and \eqn{1 - j/\gamma}{1 - j/shape}, \eqn{j = 1, \dots, k} not a negative integer. } \value{ \code{dpareto3} gives the density, \code{ppareto3} gives the distribution function, \code{qpareto3} gives the quantile function, \code{rpareto3} generates random deviates, \code{mpareto3} gives the \eqn{k}th raw moment, and \code{levpareto3} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levpareto3} computes the limited expected value using \code{\link{betaint}}. For Pareto distributions, we use the classification of Arnold (2015) with the parametrization of Klugman et al. (2012). The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Arnold, B.C. (2015), \emph{Pareto Distributions}, Second Edition, CRC Press. Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dllogis}} for the loglogistic distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ exp(dpareto3(1, min = 10, 3, 4, log = TRUE)) p <- (1:10)/10 ppareto3(qpareto3(p, min = 10, 2, 3), min = 10, 2, 3) ## mean mpareto3(1, min = 10, 2, 3) ## case with 1 - order/shape > 0 levpareto3(20, min = 10, 2, 3, order = 1) ## case with 1 - order/shape < 0 levpareto3(20, min = 10, 2/3, 3, order = 1) } \keyword{distribution} actuar/man/GammaSupp.Rd0000644000176200001440000000357415147745722014514 0ustar liggesusers\name{GammaSupp} \alias{GammaSupp} \alias{mgamma} \alias{levgamma} \alias{mgfgamma} \title{Moments and Moment Generating Function of the Gamma Distribution} \description{ Raw moments, limited moments and moment generating function for the Gamma distribution with parameters \code{shape} and \code{scale}. } \usage{ mgamma(order, shape, rate = 1, scale = 1/rate) levgamma(limit, shape, rate = 1, scale = 1/rate, order = 1) mgfgamma(t, shape, rate = 1, scale = 1/rate, log = FALSE) } \arguments{ \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} \item{rate}{an alternative way to specify the scale.} \item{shape, scale}{shape and scale parameters. Must be strictly positive.} \item{t}{numeric vector.} \item{log}{logical; if \code{TRUE}, the cumulant generating function is returned.} } \details{ The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} and the moment generating function is \eqn{E[e^{tX}]}, \eqn{k > -\alpha}{k > -shape}. } \value{ \code{mgamma} gives the \eqn{k}th raw moment, \code{levgamma} gives the \eqn{k}th moment of the limited loss variable, and \code{mgfgamma} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link[stats]{GammaDist}} } \references{ Johnson, N. L. and Kotz, S. (1970), \emph{Continuous Univariate Distributions, Volume 1}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Christophe Dutang and Mathieu Pigeon } \examples{ mgamma(2, 3, 4) - mgamma(1, 3, 4)^2 levgamma(10, 3, 4, order = 2) mgfgamma(1,3,2) } \keyword{distribution} actuar/man/ZeroTruncatedPoisson.Rd0000644000176200001440000000735715147745722016771 0ustar liggesusers\name{ZeroTruncatedPoisson} \alias{ZeroTruncatedPoisson} \alias{ZTPoisson} \alias{dztpois} \alias{pztpois} \alias{qztpois} \alias{rztpois} \title{The Zero-Truncated Poisson Distribution} \description{ Density function, distribution function, quantile function, random generation for the Zero-Truncated Poisson distribution with parameter \code{lambda}. } \usage{ dztpois(x, lambda, log = FALSE) pztpois(q, lambda, lower.tail = TRUE, log.p = FALSE) qztpois(p, lambda, lower.tail = TRUE, log.p = FALSE) rztpois(n, lambda) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of values to return.} \item{lambda}{vector of (non negative) means.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-truncated Poisson distribution has probability mass function \deqn{% p(x) = \frac{e^{-/lambda} \lambda^x}{x! (1 - e^{-\lambda})} = \frac{\lambda^x}{x! (e^{\lambda} - 1)}}{% p(x) = lambda^x exp(-lambda)/[x! (1 - exp(-lambda))] = lambda^x/[x! (e^lambda - 1)]} for \eqn{x = 1, 2, ...}, and \eqn{p(1) = 1} when \eqn{\lambda = 0}. The cumulative distribution function is \deqn{P(x) = \frac{F(x) - F(0)}{1 - F(0)},}{% P(x) = [F(x) - F(0)]/[1 - F(0)],} where \eqn{F(x)} is the distribution function of the standard Poisson. The mean is \eqn{\lambda/(1 - e^{-\lambda})}{\lambda/(1 - exp(-\lambda))} and the variance is \eqn{\lambda[1 - (\lambda+1)e^{-\lambda}]/(1 - e^{-\lambda})^2}{% \lambda[1 - (\lambda+1)exp(-\lambda)]/(1 - exp(-\lambda))^2}. In the terminology of Klugman et al. (2012), the zero-truncated Poisson is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = 0} and \eqn{b = \lambda}. If an element of \code{x} is not integer, the result of \code{dztpois} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dztpois} gives the (log) probability mass function, \code{pztpois} gives the (log) distribution function, \code{qztpois} gives the quantile function, and \code{rztpois} generates random deviates. Invalid \code{lambda} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rztpois}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}ztpois} use \code{\{d,p,q\}pois} for all but the trivial input values and \eqn{p(0)}. \code{rztpois} uses the simple inversion algorithm suggested by Peter Dalgaard on the r-help mailing list on 1 May 2005 % (\url{https://stat.ethz.ch/pipermail/r-help/2005-May/070680.html}). } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dpois}} for the standard Poisson distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ dztpois(1:5, lambda = 1) dpois(1:5, lambda = 1)/ppois(0, 1, lower = FALSE) # same pztpois(1, lambda = 0) # point mass at 1 qztpois(pztpois(1:10, 1), 1) x <- seq(0, 8) plot(x, dztpois(x, 2), type = "h", lwd = 2, ylab = "p(x)", main = "Zero-Truncated Poisson(2) and Poisson(2) PDF") points(x, dpois(x, 2), pch = 19, col = "red") legend("topright", c("ZT Poisson probabilities", "Poisson probabilities"), col = c("black", "red"), lty = c(1, 0), lwd = 2, pch = c(NA, 19)) } \keyword{distribution} actuar/man/ZeroTruncatedGeometric.Rd0000644000176200001440000000662215147745722017247 0ustar liggesusers\name{ZeroTruncatedGeometric} \alias{ZeroTruncatedGeometric} \alias{ZTGeometric} \alias{dztgeom} \alias{pztgeom} \alias{qztgeom} \alias{rztgeom} \title{The Zero-Truncated Geometric Distribution} \description{ Density function, distribution function, quantile function and random generation for the Zero-Truncated Geometric distribution with parameter \code{prob}. } \usage{ dztgeom(x, prob, log = FALSE) pztgeom(q, prob, lower.tail = TRUE, log.p = FALSE) qztgeom(p, prob, lower.tail = TRUE, log.p = FALSE) rztgeom(n, prob) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{prob}{parameter. \code{0 < prob <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-truncated geometric distribution with \code{prob} \eqn{= p} has probability mass function \deqn{% p(x) = p (1-p)^{x - 1}}{% p(x) = p (1-p)^(x-1)} for \eqn{x = 1, 2, \ldots} and \eqn{0 < p < 1}, and \eqn{p(1) = 1} when \eqn{p = 1}. The cumulative distribution function is \deqn{P(x) = \frac{F(x) - F(0)}{1 - F(0)},}{% P(x) = [F(x) - F(0)]/[1 - F(0)],} where \eqn{F(x)} is the distribution function of the standard geometric. The mean is \eqn{1/p} and the variance is \eqn{(1-p)/p^2}. In the terminology of Klugman et al. (2012), the zero-truncated geometric is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = 1-p} and \eqn{b = 0}. If an element of \code{x} is not integer, the result of \code{dztgeom} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dztgeom} gives the (log) probability mass function, \code{pztgeom} gives the (log) distribution function, \code{qztgeom} gives the quantile function, and \code{rztgeom} generates random deviates. Invalid \code{prob} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rztgeom}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}ztgeom} use \code{\{d,p,q\}geom} for all but the trivial input values and \eqn{p(0)}. \code{rztgeom} uses the simple inversion algorithm suggested by Peter Dalgaard on the r-help mailing list on 1 May 2005 % (\url{https://stat.ethz.ch/pipermail/r-help/2005-May/070680.html}). } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dgeom}} for the geometric distribution. \code{\link{dztnbinom}} for the zero-truncated negative binomial, of which the zero-truncated geometric is a special case. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ p <- 1/(1 + 0.5) dztgeom(c(1, 2, 3), prob = p) dgeom(c(1, 2, 3), p)/pgeom(0, p, lower = FALSE) # same dgeom(c(1, 2, 3) - 1, p) # same pztgeom(1, prob = 1) # point mass at 1 qztgeom(pztgeom(1:10, 0.3), 0.3) } \keyword{distribution} actuar/man/hist.grouped.data.Rd0000644000176200001440000000740515147745722016142 0ustar liggesusers\name{hist.grouped.data} \alias{hist.grouped.data} \title{Histogram for Grouped Data} \description{ This method for the generic function \code{\link{hist}} is mainly useful to plot the histogram of grouped data. If \code{plot = FALSE}, the resulting object of class \code{"histogram"} is returned for compatibility with \code{\link{hist.default}}, but does not contain much information not already in \code{x}. } \usage{ \method{hist}{grouped.data}(x, freq = NULL, probability = !freq, density = NULL, angle = 45, col = NULL, border = NULL, main = paste("Histogram of" , xname), xlim = range(x), ylim = NULL, xlab = xname, ylab, axes = TRUE, plot = TRUE, labels = FALSE, \dots) } \arguments{ \item{x}{an object of class \code{"grouped.data"}; only the first column of frequencies is used.} \item{freq}{logical; if \code{TRUE}, the histogram graphic is a representation of frequencies, the \code{counts} component of the result; if \code{FALSE}, probability densities, component \code{density}, are plotted (so that the histogram has a total area of one). Defaults to \code{TRUE} \emph{iff} group boundaries are equidistant (and \code{probability} is not specified).} \item{probability}{an \emph{alias} for \code{!freq}, for S compatibility.} \item{density}{the density of shading lines, in lines per inch. The default value of \code{NULL} means that no shading lines are drawn. Non-positive values of \code{density} also inhibit the drawing of shading lines.} \item{angle}{the slope of shading lines, given as an angle in degrees (counter-clockwise).} \item{col}{a colour to be used to fill the bars. The default of \code{NULL} yields unfilled bars.} \item{border}{the color of the border around the bars. The default is to use the standard foreground color.} \item{main, xlab, ylab}{these arguments to \code{title} have useful defaults here.} \item{xlim, ylim}{the range of x and y values with sensible defaults. Note that \code{xlim} is \emph{not} used to define the histogram (breaks), but only for plotting (when \code{plot = TRUE}).} \item{axes}{logical. If \code{TRUE} (default), axes are draw if the plot is drawn.} \item{plot}{logical. If \code{TRUE} (default), a histogram is plotted, otherwise a list of breaks and counts is returned.} \item{labels}{logical or character. Additionally draw labels on top of bars, if not \code{FALSE}; see \code{\link{plot.histogram}}.} \item{\dots}{further graphical parameters passed to \code{\link{plot.histogram}} and their to \code{\link{title}} and \code{\link{axis}} (if \code{plot=TRUE}).} } \value{ An object of class \code{"histogram"} which is a list with components: \item{breaks}{the \eqn{r + 1} group boundaries.} \item{counts}{\eqn{r} integers; the frequency within each group.} \item{density}{the relative frequencies within each group \eqn{n_j/n}{n[j]/n}, where \eqn{n_j}{n[j]} = \code{counts[j]}.} \item{intensities}{same as \code{density}. Deprecated, but retained for compatibility.} \item{mids}{the \eqn{r} group midpoints.} \item{xname}{a character string with the actual \code{x} argument name.} \item{equidist}{logical, indicating if the distances between \code{breaks} are all the same.} } \note{ The resulting value does \emph{not} depend on the values of the arguments \code{freq} (or \code{probability}) or \code{plot}. This is intentionally different from S. } \seealso{ \code{\link{hist}} and \code{\link{hist.default}} for histograms of individual data and fancy examples. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \examples{ data(gdental) hist(gdental) } \keyword{dplot} \keyword{hplot} \keyword{distribution} actuar/man/InverseExponential.Rd0000644000176200001440000000545315147745722016442 0ustar liggesusers\name{InverseExponential} \alias{InverseExponential} \alias{dinvexp} \alias{pinvexp} \alias{qinvexp} \alias{rinvexp} \alias{minvexp} \alias{levinvexp} \title{The Inverse Exponential Distribution} \description{ Density function, distribution function, quantile function, random generation raw moments and limited moments for the Inverse Exponential distribution with parameter \code{scale}. } \usage{ dinvexp(x, rate = 1, scale = 1/rate, log = FALSE) pinvexp(q, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qinvexp(p, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rinvexp(n, rate = 1, scale = 1/rate) minvexp(order, rate = 1, scale = 1/rate) levinvexp(limit, rate = 1, scale = 1/rate, order) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{scale}{parameter. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The inverse exponential distribution with parameter \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\theta e^{-\theta/x}}{x^2}}{f(x) = s exp(-s/x)/x^2} for \eqn{x > 0} and \eqn{\theta > 0}{s > 0}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{k < 1}, and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, all \eqn{k}. } \value{ \code{dinvexp} gives the density, \code{pinvexp} gives the distribution function, \code{qinvexp} gives the quantile function, \code{rinvexp} generates random deviates, \code{minvexp} gives the \eqn{k}th raw moment, and \code{levinvexp} calculates the \eqn{k}th limited moment. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levinvexp} computes the limited expected value using \code{gammainc} from package \pkg{expint}. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dinvexp(2, 2, log = TRUE)) p <- (1:10)/10 pinvexp(qinvexp(p, 2), 2) minvexp(0.5, 2) } \keyword{distribution} actuar/man/Burr.Rd0000644000176200001440000001047315147745722013530 0ustar liggesusers\name{Burr} \alias{Burr} \alias{dburr} \alias{pburr} \alias{qburr} \alias{rburr} \alias{mburr} \alias{levburr} \title{The Burr Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Burr distribution with parameters \code{shape1}, \code{shape2} and \code{scale}. } \usage{ dburr(x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) pburr(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qburr(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rburr(n, shape1, shape2, rate = 1, scale = 1/rate) mburr(order, shape1, shape2, rate = 1, scale = 1/rate) levburr(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1, shape2, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The Burr distribution with parameters \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \gamma}{= b} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\alpha \gamma (x/\theta)^\gamma}{% x [1 + (x/\theta)^\gamma]^{\alpha + 1}}}{% f(x) = (a b (x/s)^b)/(x [1 + (x/s)^b]^(a + 1))} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0}, \eqn{\gamma > 0}{b > 0} and \eqn{\theta > 0}{s > 0}. The Burr is the distribution of the random variable \deqn{\theta \left(\frac{X}{1 - X}\right)^{1/\gamma},}{% s (X/(1 - X))^(1/b),} where \eqn{X} has a beta distribution with parameters \eqn{1} and \eqn{\alpha}{a}. The Burr distribution has the following special cases: \itemize{ \item A \link[=dllogis]{Loglogistic} distribution when \code{shape1 == 1}; \item A \link[=dparalogis]{Paralogistic} distribution when \code{shape2 == shape1}; \item A \link[=dpareto]{Pareto} distribution when \code{shape2 == 1}. } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{-\gamma < k < \alpha\gamma}{-shape2 < k < shape1 * shape2}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\gamma}{k > -shape2} and \eqn{\alpha - k/\gamma}{shape1 - k/shape2} not a negative integer. } \value{ \code{dburr} gives the density, \code{pburr} gives the distribution function, \code{qburr} gives the quantile function, \code{rburr} generates random deviates, \code{mburr} gives the \eqn{k}th raw moment, and \code{levburr} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levburr} computes the limited expected value using \code{\link{betaint}}. Distribution also known as the Burr Type XII or Singh-Maddala distribution. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dpareto4}} for an equivalent distribution with a location parameter. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dburr(1, 2, 3, log = TRUE)) p <- (1:10)/10 pburr(qburr(p, 2, 3, 2), 2, 3, 2) ## variance mburr(2, 2, 3, 1) - mburr(1, 2, 3, 1) ^ 2 ## case with shape1 - order/shape2 > 0 levburr(10, 2, 3, 1, order = 2) ## case with shape1 - order/shape2 < 0 levburr(10, 1.5, 0.5, 1, order = 2) } \keyword{distribution} actuar/man/InverseWeibull.Rd0000644000176200001440000000713315147745722015554 0ustar liggesusers\name{InverseWeibull} \alias{InverseWeibull} \alias{dinvweibull} \alias{pinvweibull} \alias{qinvweibull} \alias{rinvweibull} \alias{minvweibull} \alias{levinvweibull} \alias{dlgompertz} \alias{plgompertz} \alias{qlgompertz} \alias{rlgompertz} \alias{mlgompertz} \alias{levlgompertz} \title{The Inverse Weibull Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Inverse Weibull distribution with parameters \code{shape} and \code{scale}. } \usage{ dinvweibull(x, shape, rate = 1, scale = 1/rate, log = FALSE) pinvweibull(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qinvweibull(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rinvweibull(n, shape, rate = 1, scale = 1/rate) minvweibull(order, shape, rate = 1, scale = 1/rate) levinvweibull(limit, shape, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The inverse Weibull distribution with parameters \code{shape} \eqn{= \tau}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\tau (\theta/x)^\tau e^{-(\theta/x)^\tau}}{x}}{% f(x) = a (s/x)^a exp(-(s/x)^a)/x} for \eqn{x > 0}, \eqn{\tau > 0}{a > 0} and \eqn{\theta > 0}{s > 0}. The special case \code{shape == 1} is an \link[=dinvexp]{Inverse Exponential} distribution. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{k < \tau}{k < shape}, and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, all \eqn{k}. } \value{ \code{dinvweibull} gives the density, \code{pinvweibull} gives the distribution function, \code{qinvweibull} gives the quantile function, \code{rinvweibull} generates random deviates, \code{minvweibull} gives the \eqn{k}th raw moment, and \code{levinvweibull} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levinvweibull} computes the limited expected value using \code{gammainc} from package \pkg{expint}. Distribution also knonw as the log-Gompertz. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dinvweibull(2, 3, 4, log = TRUE)) p <- (1:10)/10 pinvweibull(qinvweibull(p, 2, 3), 2, 3) mlgompertz(-1, 3, 3) levinvweibull(10, 2, 3, order = 1) } \keyword{distribution} actuar/man/PhaseType.Rd0000644000176200001440000000746415147745722014526 0ustar liggesusers\name{PhaseType} \alias{PhaseType} \alias{dphtype} \alias{pphtype} \alias{rphtype} \alias{mphtype} \alias{mgfphtype} \title{The Phase-type Distribution} \description{ Density, distribution function, random generation, raw moments and moment generating function for the (continuous) Phase-type distribution with parameters \code{prob} and \code{rates}. } \usage{ dphtype(x, prob, rates, log = FALSE) pphtype(q, prob, rates, lower.tail = TRUE, log.p = FALSE) rphtype(n, prob, rates) mphtype(order, prob, rates) mgfphtype(t, prob, rates, log = FALSE) } \arguments{ \item{x, q}{vector of quantiles.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{prob}{vector of initial probabilities for each of the transient states of the underlying Markov chain. The initial probability of the absorbing state is \code{1 - sum(prob)}.} \item{rates}{square matrix of the rates of transition among the states of the underlying Markov chain.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{t}{numeric vector.} } \details{ The phase-type distribution with parameters \code{prob} \eqn{= \pi}{= pi} and \code{rates} \eqn{= \boldsymbol{T}}{= T} has density:% \deqn{f(x) = \pi e^{\boldsymbol{T} x} \boldsymbol{t}}{% f(x) = pi \%*\% exp(T * x) \%*\% t}% for \eqn{x \ge 0} and \eqn{f(0) = 1 - \pi \boldsymbol{e}}{f(0) = 1 - pi \%*\% e}, where % \eqn{\boldsymbol{e}}{e} % is a column vector with all components equal to one, % \eqn{\boldsymbol{t} = -\boldsymbol{T} \boldsymbol{e}}{% t = -T \%*\% e} % is the exit rates vector and % \eqn{e^{\boldsymbol{T}x}}{exp(T * x)} % denotes the matrix exponential of \eqn{\boldsymbol{T}x}{T * x}. The matrix exponential of a matrix \eqn{\boldsymbol{M}}{M} is defined as the Taylor series% \deqn{e^{\boldsymbol{M}} = \sum_{n = 0}^{\infty} \frac{\boldsymbol{M}^n}{n!}.}{% exp(M) = sum(n = 0:Inf; (M^n)/(n!)).} The parameters of the distribution must satisfy \eqn{\pi \boldsymbol{e} \leq 1}{pi \%*\% e <= 1}, \eqn{\boldsymbol{T}_{ii} < 0}{T[i, i] < 0}, \eqn{\boldsymbol{T}_{ij} \geq 0}{T[i, j] >= 0} and \eqn{\boldsymbol{T} \boldsymbol{e} \leq 0}{T \%*\% e <= 0}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} and the moment generating function is \eqn{E[e^{tX}]}. } \value{ \code{dphasetype} gives the density, \code{pphasetype} gives the distribution function, \code{rphasetype} generates random deviates, \code{mphasetype} gives the \eqn{k}th raw moment, and \code{mgfphasetype} gives the moment generating function in \code{x}. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ \url{https://en.wikipedia.org/wiki/Phase-type_distribution} Neuts, M. F. (1981), \emph{Generating random variates from a distribution of phase type}, WSC '81: Proceedings of the 13th conference on Winter simulation, IEEE Press. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Christophe Dutang } \examples{ ## Erlang(3, 2) distribution T <- cbind(c(-2, 0, 0), c(2, -2, 0), c(0, 2, -2)) pi <- c(1,0,0) x <- 0:10 dphtype(x, pi, T) # density dgamma(x, 3, 2) # same pphtype(x, pi, T) # cdf pgamma(x, 3, 2) # same rphtype(10, pi, T) # random values mphtype(1, pi, T) # expected value curve(mgfphtype(x, pi, T), from = -10, to = 1) } \keyword{distribution} actuar/man/ZeroModifiedGeometric.Rd0000644000176200001440000000766015147745722017041 0ustar liggesusers\name{ZeroModifiedGeometric} \alias{ZeroModifiedGeometric} \alias{Zmgeometric} \alias{dzmgeom} \alias{pzmgeom} \alias{qzmgeom} \alias{rzmgeom} \title{The Zero-Modified Geometric Distribution} \description{ Density function, distribution function, quantile function and random generation for the Zero-Modified Geometric distribution with parameter \code{prob} and arbitrary probability at zero \code{p0}. } \usage{ dzmgeom(x, prob, p0, log = FALSE) pzmgeom(q, prob, p0, lower.tail = TRUE, log.p = FALSE) qzmgeom(p, prob, p0, lower.tail = TRUE, log.p = FALSE) rzmgeom(n, prob, p0) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{prob}{parameter. \code{0 < prob <= 1}.} \item{p0}{probability mass at zero. \code{0 <= p0 <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-modified geometric distribution with \code{prob} \eqn{= p} and \code{p0} \eqn{= p_0}{= p0} is a discrete mixture between a degenerate distribution at zero and a (standard) geometric. The probability mass function is \eqn{p(0) = p_0}{p(0) = p0} and \deqn{% p(x) = \frac{(1-p_0)}{(1-p)} f(x)}{% p(x) = (1-p0)/(1-p) f(x)} for \eqn{x = 1, 2, \ldots}, \eqn{0 < p < 1} and \eqn{0 \le p_0 \le 1}{0 \le p0 \le 1}, where \eqn{f(x)} is the probability mass function of the geometric. The cumulative distribution function is \deqn{P(x) = p_0 + (1 - p_0) \left(\frac{F(x) - F(0)}{1 - F(0)}\right)}{% P(x) = p0 + (1 - p0) [F(x) - F(0)]/[1 - F(0)].} The mean is \eqn{(1-p_0) \mu}{(1-p0)m} and the variance is \eqn{(1-p_0) \sigma^2 + p_0(1-p_0) \mu^2}{(1-p0)v + p0(1-p0)m^2}, where \eqn{\mu}{m} and \eqn{\sigma^2}{v} are the mean and variance of the zero-truncated geometric. In the terminology of Klugman et al. (2012), the zero-modified geometric is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = 1-p} and \eqn{b = 0}. The special case \code{p0 == 0} is the zero-truncated geometric. If an element of \code{x} is not integer, the result of \code{dzmgeom} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dzmgeom} gives the (log) probability mass function, \code{pzmgeom} gives the (log) distribution function, \code{qzmgeom} gives the quantile function, and \code{rzmgeom} generates random deviates. Invalid \code{prob} or \code{p0} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rzmgeom}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}zmgeom} use \code{\{d,p,q\}geom} for all but the trivial input values and \eqn{p(0)}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dgeom}} for the geometric distribution. \code{\link{dztgeom}} for the zero-truncated geometric distribution. \code{\link{dzmnbinom}} for the zero-modified negative binomial, of which the zero-modified geometric is a special case. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ p <- 1/(1 + 0.5) dzmgeom(1:5, prob = p, p0 = 0.6) (1-0.6) * dgeom(1:5, p)/pgeom(0, p, lower = FALSE) # same ## simple relation between survival functions pzmgeom(0:5, p, p0 = 0.2, lower = FALSE) (1-0.2) * pgeom(0:5, p, lower = FALSE)/pgeom(0, p, lower = FALSE) # same qzmgeom(pzmgeom(0:10, 0.3, p0 = 0.6), 0.3, p0 = 0.6) } \keyword{distribution} actuar/man/emm.Rd0000644000176200001440000000361215147745722013371 0ustar liggesusers\name{emm} \alias{emm} \alias{emm.default} \alias{emm.grouped.data} \title{Empirical Moments} \description{ Raw empirical moments for individual and grouped data. } \usage{ emm(x, order = 1, \dots) \method{emm}{default}(x, order = 1, \dots) \method{emm}{grouped.data}(x, order = 1, \dots) } \arguments{ \item{x}{a vector or matrix of individual data, or an object of class \code{"grouped data"}.} \item{order}{order of the moment. Must be positive.} \item{\dots}{further arguments passed to or from other methods.} } \details{ Arguments \code{\dots} are passed to \code{\link{colMeans}}; \code{na.rm = TRUE} may be useful for individual data with missing values. For individual data, the \eqn{k}th empirical moment is \eqn{\sum_{j = 1}^n x_j^k}{sum(j; x[j]^k)}. For grouped data with group boundaries \eqn{c_0, c_1, \dots, c_r}{c[0], c[1], \dots, c[r]} and group frequencies \eqn{n_1, \dots, n_r}{n[1], \dots, n[r]}, the \eqn{k}th empirical moment is \deqn{\frac{1}{n} \sum_{j = 1}^r \frac{n_j (c_j^{k + 1} - c_{j - 1}^{k + 1})}{% (k + 1) (c_j - c_{j - 1})},}{% (1/n) * sum(j; (n[j] * {c[j]^(k+1) - c[j-1]^(k+1)})/% ((k+1) * {c[j] - c[j-1]})),} where \eqn{n = \sum_{j = 1}^r n_j}{n = sum(j; n[j])}. } \value{ A named vector or matrix of moments. } \seealso{ \code{\link{mean}} and \code{\link{mean.grouped.data}} for simpler access to the first moment. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ ## Individual data data(dental) emm(dental, order = 1:3) ## Grouped data data(gdental) emm(gdental) x <- grouped.data(cj = gdental[, 1], nj1 = sample(1:100, nrow(gdental)), nj2 = sample(1:100, nrow(gdental))) emm(x) # same as mean(x) } \keyword{univar} actuar/man/rcomphierarc.Rd0000644000176200001440000001451315147745722015273 0ustar liggesusers\name{rcomphierarc} \alias{rcomphierarc} \alias{simul} \alias{print.portfolio} \title{Simulation from Compound Hierarchical Models} \description{ Simulate data for insurance applications allowing hierarchical structures and separate models for the frequency and severity of claims distributions. \code{rcomphierarc} is an alias for \code{simul}. } \usage{ rcomphierarc(nodes, model.freq = NULL, model.sev = NULL, weights = NULL) \method{print}{portfolio}(x, \dots) } \arguments{ \item{nodes}{a vector or a named list giving the number of "nodes" at each level in the hierarchy of the portfolio. The nodes are listed from top (portfolio) to bottom (usually the years of experience).} \item{model.freq}{a named vector of expressions specifying the frequency of claims model (see Details); if \code{NULL}, only claim amounts are simulated.} \item{model.sev}{a named vector of expressions specifying the severity of claims model (see Details); if \code{NULL}, only claim numbers are simulated.} \item{weights}{a vector of weights.} \item{x}{a \code{portfolio} object.} \item{\dots}{potential further arguments required by generic.} } \details{ The order and the names of the elements in \code{nodes}, \code{model.freq} and \code{model.sev} must match. At least one of \code{model.freq} and \code{model.sev} must be non \code{NULL}. \code{nodes} may be a basic vector, named or not, for non hierarchical models. The rule above still applies, so \code{model.freq} and \code{model.sev} should not be named if \code{nodes} is not. However, for non hierarchical models, \code{\link{rcompound}} is faster and has a simpler interface. \code{nodes} specifies the hierarchical layout of the portfolio. Each element of the list is a vector of the number of nodes at a given level. Vectors are recycled as necessary. \code{model.freq} and \code{model.sev} specify the simulation models for claim numbers and claim amounts, respectively. A model is expressed in a semi-symbolic fashion using an object of mode \code{\link[base]{expression}}. Each element of the object must be named and should be a complete call to a random number generation function, with the number of variates omitted. Hierarchical (or mixtures of) models are achieved by replacing one or more parameters of a distribution at a given level by any combination of the names of the levels above. If no mixing is to take place at a level, the model for this level can be \code{NULL}. The argument of the random number generation functions for the number of variates to simulate \strong{must} be named \code{n}. Weights will be used wherever the name \code{"weights"} appears in a model. It is the user's responsibility to ensure that the length of \code{weights} will match the number of nodes when weights are to be used. Normally, there should be one weight per node at the lowest level of the model. Data is generated in lexicographic order, that is by row in the output matrix. } \value{ An object of \code{\link[base]{class}} \code{"portfolio"}. A \code{print} method for this class displays the models used in the simulation as well as the frequency of claims for each year and entity in the portfolio. An object of class \code{"portfolio"} is a list containing the following components: \item{data}{a two dimension list where each element is a vector of claim amounts;} \item{weights}{the vector of weights given in argument reshaped as a matrix matching element \code{data}, or \code{NULL};} \item{classification}{a matrix of integers where each row is a unique set of subscripts identifying an entity in the portfolio (e.g. integers \eqn{i}, \eqn{j} and \eqn{k} for data \eqn{X_{ijkt}}{X[ijkt]});} \item{nodes}{the \code{nodes} argument, appropriately recycled;} \item{model.freq}{the frequency model as given in argument;} \item{model.sev}{the severity model as given in argument.} It is recommended to manipulate objects of class \code{"portfolio"} by means of the corresponding methods of functions \code{aggregate}, \code{frequency} and \code{severity}. } \references{ Goulet, V. and Pouliot, L.-P. (2008), Simulation of compound hierarchical models in R, \emph{North American Actuarial Journal} \bold{12}, 401--412. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Sébastien Auclair and Louis-Philippe Pouliot } \seealso{ \code{\link{rcomphierarc.summaries}} for the functions to create the matrices of aggregate claim amounts, frequencies and individual claim amounts. \code{\link{rcompound}} for a simpler and much faster way to generate variates from standard, non hierarchical, compound models. } \examples{ ## Two level (contracts and years) portfolio with frequency model ## Nit|Theta_i ~ Poisson(Theta_i), Theta_i ~ Gamma(2, 3) and severity ## model X ~ Lognormal(5, 1) rcomphierarc(nodes = list(contract = 10, year = 5), model.freq = expression(contract = rgamma(2, 3), year = rpois(contract)), model.sev = expression(contract = NULL, year = rlnorm(5, 1))) ## Model with weights and mixtures for both frequency and severity ## models nodes <- list(entity = 8, year = c(5, 4, 4, 5, 3, 5, 4, 5)) mf <- expression(entity = rgamma(2, 3), year = rpois(weights * entity)) ms <- expression(entity = rnorm(5, 1), year = rlnorm(entity, 1)) wit <- sample(2:10, 35, replace = TRUE) pf <- rcomphierarc(nodes, mf, ms, wit) pf # print method weights(pf) # extraction of weights aggregate(pf)[, -1]/weights(pf)[, -1] # ratios ## Four level hierarchical model for frequency only nodes <- list(sector = 3, unit = c(3, 4), employer = c(3, 4, 3, 4, 2, 3, 4), year = 5) mf <- expression(sector = rexp(1), unit = rexp(sector), employer = rgamma(unit, 1), year = rpois(employer)) pf <- rcomphierarc(nodes, mf, NULL) pf # print method aggregate(pf) # aggregate claim amounts frequency(pf) # frequencies severity(pf) # individual claim amounts ## Standard, non hierarchical, compound model with simplified ## syntax (function rcompound() is much faster for such cases) rcomphierarc(10, model.freq = expression(rpois(2)), model.sev = expression(rgamma(2, 3))) } \keyword{datagen} actuar/man/ogive.Rd0000644000176200001440000000635115147745722013727 0ustar liggesusers\name{ogive} \alias{ogive} \alias{ogive.default} \alias{ogive.grouped.data} \alias{print.ogive} \alias{summary.ogive} \alias{knots.ogive} \alias{plot.ogive} \title{Ogive for Grouped Data} \description{ Compute a smoothed empirical distribution function for grouped data. } \usage{ ogive(x, \dots) \method{ogive}{default}(x, y = NULL, breaks = "Sturges", nclass = NULL, \dots) \method{ogive}{grouped.data}(x, \dots) \method{print}{ogive}(x, digits = getOption("digits") - 2, \dots) \method{summary}{ogive}(object, \dots) \method{knots}{ogive}(Fn, \dots) \method{plot}{ogive}(x, main = NULL, xlab = "x", ylab = "F(x)", \dots) } \arguments{ \item{x}{for the generic and all but the default method, an object of class \code{"grouped.data"}; for the default method, a vector of individual data if \code{y} is \code{NULL}, a vector of group boundaries otherwise.} \item{y}{a vector of group frequencies.} \item{breaks, nclass}{arguments passed to \code{\link{grouped.data}}; used only for individual data (when \code{y} is \code{NULL}).} \item{digits}{number of significant digits to use, see \code{\link{print}}.} \item{Fn, object}{an \R object inheriting from \code{"ogive"}.} \item{main}{main title.} \item{xlab, ylab}{labels of x and y axis.} \item{\dots}{arguments to be passed to subsequent methods.} } \details{ The ogive is a linear interpolation of the empirical cumulative distribution function. The equation of the ogive is \deqn{G_n(x) = \frac{(c_j - x) F_n(c_{j - 1}) + (x - c_{j - 1}) F_n(c_j)}{c_j - c_{j - 1}}}{% Gn(x) = 1/(c[j] - c[j-1]) * [(c[j] - x) Fn(c[j-1]) + (x - c[j-1]) Fn(c[j])]} for \eqn{c_{j-1} < x \leq c_j}{c[j-1] < x <= c[j]} and where \eqn{c_0, \dots, c_r}{c[0], \dots, c[r]} are the \eqn{r + 1} group boundaries and \eqn{F_n}{Fn} is the empirical distribution function of the sample. } \value{ For \code{ogive}, a function of class \code{"ogive"}, inheriting from the \code{"\link{function}"} class. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \seealso{ \code{\link{grouped.data}} to create grouped data objects; \code{\link{quantile.grouped.data}} for the inverse function; \code{\link{approxfun}}, which is used to compute the ogive; \code{\link{stepfun}} for related documentation (even though the ogive is not a step function). } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ ## Most common usage: create ogive from grouped data object. Fn <- ogive(gdental) Fn summary(Fn) knots(Fn) # the group boundaries Fn(knots(Fn)) # true values of the empirical cdf Fn(c(80, 200, 2000)) # linear interpolations plot(Fn) # graphical representation ## Alternative 1: create ogive directly from individual data ## without first creating a grouped data object. ogive(dental) # automatic class boundaries ogive(dental, breaks = c(0, 50, 200, 500, 1500, 2000)) ## Alternative 2: create ogive from set of group boundaries and ## group frequencies. cj <- c(0, 25, 50, 100, 250, 500, 1000) nj <- c(30, 31, 57, 42, 45, 10) ogive(cj, nj) } \keyword{dplot} \keyword{hplot} actuar/man/mde.Rd0000644000176200001440000000713215147745722013361 0ustar liggesusers\name{mde} \alias{Mde} \alias{mde} \title{Minimum Distance Estimation} \description{ Minimum distance fitting of univariate distributions, allowing parameters to be held fixed if desired. } \usage{ mde(x, fun, start, measure = c("CvM", "chi-square", "LAS"), weights = NULL, ...) } \arguments{ \item{x}{a vector or an object of class \code{"grouped data"} (in which case only the first column of frequencies is used).} \item{fun}{function returning a cumulative distribution (for \code{measure = "CvM"} and \code{measure = "chi-square"}) or a limited expected value (for \code{measure = "LAS"}) evaluated at its first argument.} \item{start}{a named list giving the parameters to be optimized with initial values} \item{measure}{either \code{"CvM"} for the Cramer-von Mises method, \code{"chi-square"} for the modified chi-square method, or \code{"LAS"} for the layer average severity method.} \item{weights}{weights; see Details.} \item{\dots}{Additional parameters, either for \code{fun} or for \code{optim}. In particular, it can be used to specify bounds via \code{lower} or \code{upper} or both. If arguments of \code{fun} are included they will be held fixed.} } \details{ The Cramer-von Mises method (\code{"CvM"}) minimizes the squared difference between the theoretical cdf and the empirical cdf at the data points (for individual data) or the ogive at the knots (for grouped data). The modified chi-square method (\code{"chi-square"}) minimizes the modified chi-square statistic for grouped data, that is the squared difference between the expected and observed frequency within each group. The layer average severity method (\code{"LAS"}) minimizes the squared difference between the theoretical and empirical limited expected value within each group for grouped data. All sum of squares can be weighted. If arguments \code{weights} is missing, weights default to 1 for \code{measure = "CvM"} and \code{measure = "LAS"}; for \code{measure = "chi-square"}, weights default to \eqn{1/n_j}{1/n[j]}, where \eqn{n_j}{n[j]} is the frequency in group \eqn{j = 1, \dots, r}. Optimization is performed using \code{\link{optim}}. For one-dimensional problems the Nelder-Mead method is used and for multi-dimensional problems the BFGS method, unless arguments named \code{lower} or \code{upper} are supplied when \code{L-BFGS-B} is used or \code{method} is supplied explicitly. } \value{ An object of class \code{"mde"}, a list with two components: \item{estimate}{the parameter estimates, and} \item{distance}{the distance.} } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ ## Individual data example data(dental) mde(dental, pexp, start = list(rate = 1/200), measure = "CvM") ## Example 2.21 of Klugman et al. (1998) data(gdental) mde(gdental, pexp, start = list(rate = 1/200), measure = "CvM") mde(gdental, pexp, start = list(rate = 1/200), measure = "chi-square") mde(gdental, levexp, start = list(rate = 1/200), measure = "LAS") ## Two-parameter distribution example try(mde(gdental, ppareto, start = list(shape = 3, scale = 600), measure = "CvM")) # no convergence ## Working in log scale often solves the problem pparetolog <- function(x, shape, scale) ppareto(x, exp(shape), exp(scale)) ( p <- mde(gdental, pparetolog, start = list(shape = log(3), scale = log(600)), measure = "CvM") ) exp(p$estimate) } \keyword{distribution} \keyword{htest} actuar/man/elev.Rd0000644000176200001440000000473015147745722013550 0ustar liggesusers\name{elev} \alias{elev} \alias{elev.default} \alias{elev.grouped.data} \alias{print.elev} \alias{summary.elev} \alias{knots.elev} \alias{plot.elev} \title{Empirical Limited Expected Value} \description{ Compute the empirical limited expected value for individual or grouped data. } \usage{ elev(x, ...) \method{elev}{default}(x, \dots) \method{elev}{grouped.data}(x, \dots) \method{print}{elev}(x, digits = getOption("digits") - 2, \dots) \method{summary}{elev}(object, \dots) \method{knots}{elev}(Fn, \dots) \method{plot}{elev}(x, \dots, main = NULL, xlab = "x", ylab = "Empirical LEV") } \arguments{ \item{x}{a vector or an object of class \code{"grouped.data"} (in which case only the first column of frequencies is used); for the methods, an object of class \code{"elev"}, typically.} \item{digits}{number of significant digits to use, see \code{\link{print}}.} \item{Fn, object}{an \R object inheriting from \code{"ogive"}.} \item{main}{main title.} \item{xlab, ylab}{labels of x and y axis.} \item{\dots}{arguments to be passed to subsequent methods.} } \details{ The limited expected value (LEV) at \eqn{u} of a random variable \eqn{X} is \eqn{E[X \wedge u] = E[\min(X, u)]}{E[X ^ u] = E[min(X, u)]}. For individual data \eqn{x_1, \dots, x_n}{x[1], \dots, x[n]}, the empirical LEV \eqn{E_n[X \wedge u]}{En[X ^ u]} is thus \deqn{E_n[X \wedge u] = \frac{1}{n} \left( \sum_{x_j < u} x_j + \sum_{x_j \geq u} u \right).}{% En[X ^ u] = (sum(x[j] < u; 1) + sum(x[j] >= u; u))/n.} Methods of \code{elev} exist for individual data or for grouped data created with \code{\link{grouped.data}}. The formula in this case is too long to show here. See the reference for details. } \value{ For \code{elev}, a function of class \code{"elev"}, inheriting from the \code{"\link{function}"} class. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \seealso{ \code{\link{grouped.data}} to create grouped data objects; \code{\link{stepfun}} for related documentation (even though the empirical LEV is not a step function). } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ data(gdental) lev <- elev(gdental) lev summary(lev) knots(lev) # the group boundaries lev(knots(lev)) # empirical lev at boundaries lev(c(80, 200, 2000)) # and at other limits plot(lev, type = "o", pch = 16) } \keyword{dplot} \keyword{hplot} actuar/man/Pareto4.Rd0000644000176200001440000001162515147745722014134 0ustar liggesusers\name{Pareto4} \alias{Pareto4} \alias{dpareto4} \alias{ppareto4} \alias{qpareto4} \alias{rpareto4} \alias{mpareto4} \alias{levpareto4} \title{The Pareto IV Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Pareto IV distribution with parameters \code{min}, \code{shape1}, \code{shape2} and \code{scale}. } \usage{ dpareto4(x, min, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) ppareto4(q, min, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qpareto4(p, min, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rpareto4(n, min, shape1, shape2, rate = 1, scale = 1/rate) mpareto4(order, min, shape1, shape2, rate = 1, scale = 1/rate) levpareto4(limit, min, shape1, shape2, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{min}{lower bound of the support of the distribution.} \item{shape1, shape2, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The Pareto IV (or \dQuote{type IV}) distribution with parameters \code{min} \eqn{= \mu}{= m}, \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \gamma}{= b} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\alpha \gamma ((x - \mu)/\theta)^{\gamma - 1}}{% \theta [1 + ((x - \mu)/\theta)^\gamma]^{\alpha + 1}}}{% f(x) = (a b ((x - m)/s)^(b - 1))/(s [1 + ((x - m)/s)^b]^(a + 1))} for \eqn{x > \mu}{x > m}, \eqn{-\infty < \mu < \infty}{-Inf < m < Inf}, \eqn{\alpha > 0}{a > 0}, \eqn{\gamma > 0}{b > 0} and \eqn{\theta > 0}{s > 0}. The Pareto IV is the distribution of the random variable \deqn{\mu + \theta \left(\frac{X}{1 - X}\right)^{1/\gamma},}{% m + s (X/(1 - X))^(1/b),} where \eqn{X} has a beta distribution with parameters \eqn{1} and \eqn{\alpha}{a}. It derives from the \link[=dfpareto]{Feller-Pareto} distribution with \eqn{\tau = 1}{shape3 = 1}. Setting \eqn{\mu = 0}{min = 0} yields the \link[=dburr]{Burr} distribution. The Pareto IV distribution also has the following direct special cases: \itemize{ \item A \link[=dpareto3]{Pareto III} distribution when \code{shape1 == 1}; \item A \link[=dpareto2]{Pareto II} distribution when \code{shape1 == 1}. } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} for nonnegative integer values of \eqn{k < \alpha\gamma}{k < shape1 * shape2}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} for nonnegative integer values of \eqn{k} and \eqn{\alpha - j/\gamma}{shape1 - j/shape2}, \eqn{j = 1, \dots, k} not a negative integer. } \value{ \code{dpareto4} gives the density, \code{ppareto4} gives the distribution function, \code{qpareto4} gives the quantile function, \code{rpareto4} generates random deviates, \code{mpareto4} gives the \eqn{k}th raw moment, and \code{levpareto4} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levpareto4} computes the limited expected value using \code{\link{betaint}}. For Pareto distributions, we use the classification of Arnold (2015) with the parametrization of Klugman et al. (2012). The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Arnold, B.C. (2015), \emph{Pareto Distributions}, Second Edition, CRC Press. Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dburr}} for the Burr distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ exp(dpareto4(1, min = 10, 2, 3, log = TRUE)) p <- (1:10)/10 ppareto4(qpareto4(p, min = 10, 2, 3, 2), min = 10, 2, 3, 2) ## variance mpareto4(2, min = 10, 2, 3, 1) - mpareto4(1, min = 10, 2, 3, 1) ^ 2 ## case with shape1 - order/shape2 > 0 levpareto4(10, min = 10, 2, 3, 1, order = 2) ## case with shape1 - order/shape2 < 0 levpareto4(10, min = 10, 1.5, 0.5, 1, order = 2) } \keyword{distribution} actuar/man/ZeroTruncatedBinomial.Rd0000644000176200001440000000770215147745722017063 0ustar liggesusers\name{ZeroTruncatedBinomial} \alias{ZeroTruncatedBinomial} \alias{ZTBinomial} \alias{dztbinom} \alias{pztbinom} \alias{qztbinom} \alias{rztbinom} \title{The Zero-Truncated Binomial Distribution} \description{ Density function, distribution function, quantile function and random generation for the Zero-Truncated Binomial distribution with parameters \code{size} and \code{prob}. } \usage{ dztbinom(x, size, prob, log = FALSE) pztbinom(q, size, prob, lower.tail = TRUE, log.p = FALSE) qztbinom(p, size, prob, lower.tail = TRUE, log.p = FALSE) rztbinom(n, size, prob) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{size}{number of trials (strictly positive integer).} \item{prob}{probability of success on each trial. \code{0 <= prob <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-truncated binomial distribution with \code{size} \eqn{= n} and \code{prob} \eqn{= p} has probability mass function \deqn{% p(x) = {n \choose x} \frac{p^x (1 - p)^{n-x}}{1 - (1 - p)^n}}{% p(x) = choose(n, x) [p^x (1-p)^(n-x)]/[1 - (1-p)^n]} for \eqn{x = 1, \ldots, n} and \eqn{0 < p \le 1}, and \eqn{p(1) = 1} when \eqn{p = 0}. The cumulative distribution function is \deqn{P(x) = \frac{F(x) - F(0)}{1 - F(0)},}{% P(x) = [F(x) - F(0)]/[1 - F(0)],} where \eqn{F(x)} is the distribution function of the standard binomial. The mean is \eqn{np/(1 - (1-p)^n)} and the variance is \eqn{np[(1-p) - (1-p+np)(1-p)^n]/[1 - (1-p)^n]^2}. In the terminology of Klugman et al. (2012), the zero-truncated binomial is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = -p/(1-p)} and \eqn{b = (n+1)p/(1-p)}. If an element of \code{x} is not integer, the result of \code{dztbinom} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dztbinom} gives the probability mass function, \code{pztbinom} gives the distribution function, \code{qztbinom} gives the quantile function, and \code{rztbinom} generates random deviates. Invalid \code{size} or \code{prob} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rztbinom}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}ztbinom} use \code{\{d,p,q\}binom} for all but the trivial input values and \eqn{p(0)}. \code{rztbinom} uses the simple inversion algorithm suggested by Peter Dalgaard on the r-help mailing list on 1 May 2005 % (\url{https://stat.ethz.ch/pipermail/r-help/2005-May/070680.html}). } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dbinom}} for the binomial distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ dztbinom(1:5, size = 5, prob = 0.4) dbinom(1:5, 5, 0.4)/pbinom(0, 5, 0.4, lower = FALSE) # same pztbinom(1, 2, prob = 0) # point mass at 1 qztbinom(pztbinom(1:10, 10, 0.6), 10, 0.6) n <- 8; p <- 0.3 x <- 0:n title <- paste("ZT Binomial(", n, ", ", p, ") and Binomial(", n, ", ", p,") PDF", sep = "") plot(x, dztbinom(x, n, p), type = "h", lwd = 2, ylab = "p(x)", main = title) points(x, dbinom(x, n, p), pch = 19, col = "red") legend("topright", c("ZT binomial probabilities", "Binomial probabilities"), col = c("black", "red"), lty = c(1, 0), lwd = 2, pch = c(NA, 19)) } \keyword{distribution} actuar/man/PoissonInverseGaussian.Rd0000644000176200001440000001317415147745722017300 0ustar liggesusers\name{PoissonInverseGaussian} \alias{PoissonInverseGaussian} \alias{PIG} \alias{dpoisinvgauss} \alias{ppoisinvgauss} \alias{qpoisinvgauss} \alias{rpoisinvgauss} \alias{dpig} \alias{ppig} \alias{qpig} \alias{rpig} \title{The Poisson-Inverse Gaussian Distribution} \description{ Density function, distribution function, quantile function and random generation for the Poisson-inverse Gaussian discrete distribution with parameters \code{mean} and \code{shape}. } \usage{ dpoisinvgauss(x, mean, shape = 1, dispersion = 1/shape, log = FALSE) ppoisinvgauss(q, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE) qpoisinvgauss(p, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE) rpoisinvgauss(n, mean, shape = 1, dispersion = 1/shape) } \arguments{ \item{x}{vector of (positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{mean, shape}{parameters. Must be strictly positive. Infinite values are supported.} \item{dispersion}{an alternative way to specify the shape.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The Poisson-inverse Gaussian distribution is the result of the continuous mixture between a Poisson distribution and an inverse Gaussian, that is, the distribution with probability mass function \deqn{% p(x) = \int_0^\infty \frac{\lambda^x e^{-\lambda}}{x!}\, g(\lambda; \mu, \phi)\, d\lambda,}{% p(x) = int_0^Inf (y^x exp(-y))/x! g(y; \mu, \phi) dy,} where \eqn{g(\lambda; \mu, \phi)}{g(y; \mu, \phi)} is the density function of the inverse Gaussian distribution with parameters \code{mean} \eqn{= \mu} and \code{dispersion} \eqn{= \phi} (see \code{\link{dinvgauss}}). The resulting probability mass function is \deqn{% p(x) = \sqrt{\frac{2}{\pi \phi}} \frac{e^{(\phi\mu)^{-1}}}{x!} \left( \sqrt{2\phi\left(1 + \frac{1}{2\phi\mu^2}\right)} \right)^{-(x - \frac{1}{2})} K_{x - \frac{1}{2}} \left( \sqrt{\frac{2}{\phi}\left(1 + \frac{1}{2\phi\mu^2}\right)} \right),}{% p(x) = sqrt(2/(\pi \phi)) exp(1/(\phi \mu))/x! * [\sqrt(2 \phi (1 + 1/(2 \phi \mu^2)))]^(-(x-1/2)) * K(\sqrt((2/\phi) (1 + 1/(2 \phi \mu^2))); x-1/2),} for \eqn{x = 0, 1, \dots}, \eqn{\mu > 0}, \eqn{\phi > 0} and where \eqn{K_\nu(x)}{K(x; \nu)} is the modified Bessel function of the third kind implemented by \R's \code{\link{besselK}()} and defined in its help. The limiting case \eqn{\mu = \infty}{\mu = Inf} has well defined probability mass and distribution functions, but has no finite strictly positive, integer moments. The pmf in this case reduces to \deqn{% p(x) = \sqrt{\frac{2}{\pi \phi}} \frac{1}{x!} (\sqrt{2\phi})^{-(x - \frac{1}{2})} K_{x - \frac{1}{2}}(\sqrt{2/\phi}).}{% p(x) = sqrt(2/(\pi \phi)) 1/x! [\sqrt(2 \phi)]^(-(x-1/2)) * K(\sqrt(2/\phi); x-1/2).} The limiting case \eqn{\phi = 0} is a degenerate distribution in \eqn{x = 0}. If an element of \code{x} is not integer, the result of \code{dpoisinvgauss} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{F(x) \ge p}, where \eqn{F} is the distribution function. } \value{ \code{dpoisinvgauss} gives the probability mass function, \code{ppoisinvgauss} gives the distribution function, \code{qpoisinvgauss} gives the quantile function, and \code{rpoisinvgauss} generates random deviates. Invalid arguments will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rpoisinvgauss}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ \code{[dpqr]pig} are aliases for \code{[dpqr]poisinvgauss}. \code{qpoisinvgauss} is based on \code{qbinom} et al.; it uses the Cornish--Fisher Expansion to include a skewness correction to a normal approximation, followed by a search. } \references{ Holla, M. S. (1966), \dQuote{On a Poisson-Inverse Gaussian Distribution}, \emph{Metrika}, vol. 15, p. 377-384. Johnson, N. L., Kemp, A. W. and Kotz, S. (2005), \emph{Univariate Discrete Distributions, Third Edition}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. Shaban, S. A., (1981) \dQuote{Computation of the poisson-inverse gaussian distribution}, \emph{Communications in Statistics - Theory and Methods}, vol. 10, no. 14, p. 1389-1399. } \seealso{ \code{\link{dpois}} for the Poisson distribution, \code{\link{dinvgauss}} for the inverse Gaussian distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ ## Tables I and II of Shaban (1981) x <- 0:2 sapply(c(0.4, 0.8, 1), dpoisinvgauss, x = x, mean = 0.1) sapply(c(40, 80, 100, 130), dpoisinvgauss, x = x, mean = 1) qpoisinvgauss(ppoisinvgauss(0:10, 1, dis = 2.5), 1, dis = 2.5) x <- rpoisinvgauss(1000, 1, dis = 2.5) y <- sort(unique(x)) plot(y, table(x)/length(x), type = "h", lwd = 2, pch = 19, col = "black", xlab = "x", ylab = "p(x)", main = "Empirical vs theoretical probabilities") points(y, dpoisinvgauss(y, 1, dis = 2.5), pch = 19, col = "red") legend("topright", c("empirical", "theoretical"), lty = c(1, NA), pch = c(NA, 19), col = c("black", "red")) } \keyword{distribution} actuar/man/Paralogistic.Rd0000644000176200001440000000711615147745722015237 0ustar liggesusers\name{Paralogistic} \alias{Paralogistic} \alias{dparalogis} \alias{pparalogis} \alias{qparalogis} \alias{rparalogis} \alias{mparalogis} \alias{levparalogis} \title{The Paralogistic Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Paralogistic distribution with parameters \code{shape} and \code{scale}. } \usage{ dparalogis(x, shape, rate = 1, scale = 1/rate, log = FALSE) pparalogis(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qparalogis(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rparalogis(n, shape, rate = 1, scale = 1/rate) mparalogis(order, shape, rate = 1, scale = 1/rate) levparalogis(limit, shape, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The paralogistic distribution with parameters \code{shape} \eqn{= \alpha}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\alpha^2 (x/\theta)^\alpha}{% x [1 + (x/\theta)^\alpha)^{\alpha + 1}}}{% f(x) = a^2 (x/s)^a / (x [1 + (x/s)^a]^(a + 1))} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0} and \eqn{\theta > 0}{b > 0}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{-\alpha < k < \alpha^2}{-shape < k < shape^2}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\alpha}{k > -shape} and \eqn{\alpha - k/\alpha}{shape - k/shape} not a negative integer. } \value{ \code{dparalogis} gives the density, \code{pparalogis} gives the distribution function, \code{qparalogis} gives the quantile function, \code{rparalogis} generates random deviates, \code{mparalogis} gives the \eqn{k}th raw moment, and \code{levparalogis} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levparalogis} computes the limited expected value using \code{\link{betaint}}. See Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dparalogis(2, 3, 4, log = TRUE)) p <- (1:10)/10 pparalogis(qparalogis(p, 2, 3), 2, 3) ## variance mparalogis(2, 2, 3) - mparalogis(1, 2, 3)^2 ## case with shape - order/shape > 0 levparalogis(10, 2, 3, order = 2) ## case with shape - order/shape < 0 levparalogis(10, 1.25, 3, order = 2) } \keyword{distribution} actuar/man/quantile.aggregateDist.Rd0000644000176200001440000000313615147745722017207 0ustar liggesusers\name{quantile.aggregateDist} \alias{quantile.aggregateDist} \alias{VaR.aggregateDist} \title{Quantiles of Aggregate Claim Amount Distribution} \description{ Quantile and Value-at-Risk methods for objects of class \code{"aggregateDist"}. } \usage{ \method{quantile}{aggregateDist}(x, probs = c(0.25, 0.5, 0.75, 0.9, 0.95, 0.975, 0.99, 0.995), smooth = FALSE, names = TRUE, \dots) \method{VaR}{aggregateDist}(x, conf.level = c(0.9, 0.95, 0.99), smooth = FALSE, names = TRUE, \dots) } \arguments{ \item{x}{an object of class \code{"aggregateDist"}.} \item{probs, conf.level}{numeric vector of probabilities with values in \eqn{[0, 1)}.} \item{smooth}{logical; when \code{TRUE} and \code{x} is a step function, quantiles are linearly interpolated between knots.} \item{names}{logical; if true, the result has a \code{names} attribute. Set to \code{FALSE} for speedup with many \code{probs}.} \item{\dots}{further arguments passed to or from other methods.} } \details{ The quantiles are taken directly from the cumulative distribution function defined in \code{x}. Linear interpolation is available for step functions. } \value{ A numeric vector, named if \code{names} is \code{TRUE}. } \seealso{ \code{\link{aggregateDist}} } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Louis-Philippe Pouliot } \examples{ model.freq <- expression(data = rpois(3)) model.sev <- expression(data = rlnorm(10, 1.5)) Fs <- aggregateDist("simulation", model.freq, model.sev, nb.simul = 1000) quantile(Fs, probs = c(0.25, 0.5, 0.75)) VaR(Fs) } \keyword{univar} actuar/man/Pareto2.Rd0000644000176200001440000001107515147745722014131 0ustar liggesusers\name{Pareto2} \alias{Pareto2} \alias{dpareto2} \alias{ppareto2} \alias{qpareto2} \alias{rpareto2} \alias{mpareto2} \alias{levpareto2} \title{The Pareto II Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Pareto II distribution with parameters \code{min}, \code{shape} and \code{scale}. } \usage{ dpareto2(x, min, shape, rate = 1, scale = 1/rate, log = FALSE) ppareto2(q, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qpareto2(p, min, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rpareto2(n, min, shape, rate = 1, scale = 1/rate) mpareto2(order, min, shape, rate = 1, scale = 1/rate) levpareto2(limit, min, shape, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{min}{lower bound of the support of the distribution.} \item{shape, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The Pareto II (or \dQuote{type II}) distribution with parameters \code{min} \eqn{= \mu}{= m}, \code{shape} \eqn{= \alpha}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\alpha}{% \theta [1 + (x - \mu)/\theta]^{\alpha + 1}}}{% f(x) = a/(s [1 + (x - m)/s]^(a + 1))} for \eqn{x > \mu}{x > m}, \eqn{-\infty < \mu < \infty}{-Inf < m < Inf}, \eqn{\alpha > 0}{a > 0} and \eqn{\theta > 0}{s > 0}. The Pareto II is the distribution of the random variable \deqn{\mu + \theta \left(\frac{X}{1 - X}\right),}{% m + s X/(1 - X),} where \eqn{X} has a beta distribution with parameters \eqn{1} and \eqn{\alpha}{a}. It derives from the \link[=dfpareto]{Feller-Pareto} distribution with \eqn{\tau = \gamma = 1}{shape2 = shape3 = 1}. Setting \eqn{\mu = 0}{min = 0} yields the familiar \link[=dpareto]{Pareto} distribution. The \link[=dpareto1]{Pareto I} (or Single parameter Pareto) distribution is a special case of the Pareto II with \code{min == scale}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} for nonnegative integer values of \eqn{k < \alpha}{k < shape}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} for nonnegative integer values of \eqn{k} and \eqn{\alpha - j}{shape1 - j}, \eqn{j = 1, \dots, k} not a negative integer. } \value{ \code{dpareto2} gives the density, \code{ppareto2} gives the distribution function, \code{qpareto2} gives the quantile function, \code{rpareto2} generates random deviates, \code{mpareto2} gives the \eqn{k}th raw moment, and \code{levpareto2} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levpareto2} computes the limited expected value using \code{\link{betaint}}. For Pareto distributions, we use the classification of Arnold (2015) with the parametrization of Klugman et al. (2012). The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Arnold, B.C. (2015), \emph{Pareto Distributions}, Second Edition, CRC Press. Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dpareto}} for the Pareto distribution without a location parameter. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ exp(dpareto2(1, min = 10, 3, 4, log = TRUE)) p <- (1:10)/10 ppareto2(qpareto2(p, min = 10, 2, 3), min = 10, 2, 3) ## variance mpareto2(2, min = 10, 4, 1) - mpareto2(1, min = 10, 4, 1)^2 ## case with shape - order > 0 levpareto2(10, min = 10, 3, scale = 1, order = 2) ## case with shape - order < 0 levpareto2(10, min = 10, 1.5, scale = 1, order = 2) } \keyword{distribution} actuar/man/TransformedGamma.Rd0000644000176200001440000001053515147745722016044 0ustar liggesusers\name{TransformedGamma} \alias{TransformedGamma} \alias{dtrgamma} \alias{ptrgamma} \alias{qtrgamma} \alias{rtrgamma} \alias{mtrgamma} \alias{levtrgamma} \title{The Transformed Gamma Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Transformed Gamma distribution with parameters \code{shape1}, \code{shape2} and \code{scale}. } \usage{ dtrgamma(x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) ptrgamma(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qtrgamma(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rtrgamma(n, shape1, shape2, rate = 1, scale = 1/rate) mtrgamma(order, shape1, shape2, rate = 1, scale = 1/rate) levtrgamma(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1, shape2, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The transformed gamma distribution with parameters \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \tau}{= b} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\tau u^\alpha e^{-u}}{x \Gamma(\alpha)}, % \quad u = (x/\theta)^\tau}{% f(x) = b u^a exp(-u) / (x Gamma(a)), u = (x/s)^b} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0}, \eqn{\tau > 0}{b > 0} and \eqn{\theta > 0}{s > 0}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The transformed gamma is the distribution of the random variable \eqn{\theta X^{1/\tau},}{s X^(1/b),} where \eqn{X} has a gamma distribution with shape parameter \eqn{\alpha}{a} and scale parameter \eqn{1} or, equivalently, of the random variable \eqn{Y^{1/\tau}}{Y^(1/b)} with \eqn{Y} a gamma distribution with shape parameter \eqn{\alpha}{a} and scale parameter \eqn{\theta^\tau}{s^b}. The transformed gamma probability distribution defines a family of distributions with the following special cases: \itemize{ \item A \link[=dgamma]{Gamma} distribution when \code{shape2 == 1}; \item A \link[=dweibull]{Weibull} distribution when \code{shape1 == 1}; \item An \link[=dexp]{Exponential} distribution when \code{shape2 == shape1 == 1}. } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\alpha\tau}{k > -shape1 * shape2}. } \value{ \code{dtrgamma} gives the density, \code{ptrgamma} gives the distribution function, \code{qtrgamma} gives the quantile function, \code{rtrgamma} generates random deviates, \code{mtrgamma} gives the \eqn{k}th raw moment, and \code{levtrgamma} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ Distribution also known as the Generalized Gamma. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dtrgamma(2, 3, 4, 5, log = TRUE)) p <- (1:10)/10 ptrgamma(qtrgamma(p, 2, 3, 4), 2, 3, 4) mtrgamma(2, 3, 4, 5) - mtrgamma(1, 3, 4, 5) ^ 2 levtrgamma(10, 3, 4, 5, order = 2) } \keyword{distribution} actuar/man/ZeroModifiedBinomial.Rd0000644000176200001440000001074715147745722016655 0ustar liggesusers\name{ZeroModifiedBinomial} \alias{ZeroModifiedBinomial} \alias{ZMBinomial} \alias{dzmbinom} \alias{pzmbinom} \alias{qzmbinom} \alias{rzmbinom} \title{The Zero-Modified Binomial Distribution} \description{ Density function, distribution function, quantile function and random generation for the Zero-Modified Binomial distribution with parameters \code{size} and \code{prob}, and probability at zero \code{p0}. } \usage{ dzmbinom(x, size, prob, p0, log = FALSE) pzmbinom(q, size, prob, p0, lower.tail = TRUE, log.p = FALSE) qzmbinom(p, size, prob, p0, lower.tail = TRUE, log.p = FALSE) rzmbinom(n, size, prob, p0) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{size}{number of trials (strictly positive integer).} \item{prob}{probability of success on each trial. \code{0 <= prob <= 1}.} \item{p0}{probability mass at zero. \code{0 <= p0 <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-modified binomial distribution with \code{size} \eqn{= n}, \code{prob} \eqn{= p} and \code{p0} \eqn{= p_0}{= p0} is a discrete mixture between a degenerate distribution at zero and a (standard) binomial. The probability mass function is \eqn{p(0) = p_0}{p(0) = p0} and \deqn{% p(x) = \frac{(1-p_0)}{(1 - (1-p)^n)} f(x)}{% p(x) = (1-p0)/[1 - (1-p)^n] f(x)} for \eqn{x = 1, \ldots, n}, \eqn{0 < p \le 1} and \eqn{0 \le p_0 \le 1}{0 \le p0 \le 1}, where \eqn{f(x)} is the probability mass function of the binomial. The cumulative distribution function is \deqn{P(x) = p_0 + (1 - p_0) \left(\frac{F(x) - F(0)}{1 - F(0)}\right)}{% P(x) = p0 + (1 - p0) [F(x) - F(0)]/[1 - F(0)].} The mean is \eqn{(1-p_0) \mu}{(1-p0)m} and the variance is \eqn{(1-p_0) \sigma^2 + p_0(1-p_0) \mu^2}{(1-p0)v + p0(1-p0)m^2}, where \eqn{\mu}{m} and \eqn{\sigma^2}{v} are the mean and variance of the zero-truncated binomial. In the terminology of Klugman et al. (2012), the zero-modified binomial is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = -p/(1-p)} and \eqn{b = (n+1)p/(1-p)}. The special case \code{p0 == 0} is the zero-truncated binomial. If an element of \code{x} is not integer, the result of \code{dzmbinom} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dzmbinom} gives the probability mass function, \code{pzmbinom} gives the distribution function, \code{qzmbinom} gives the quantile function, and \code{rzmbinom} generates random deviates. Invalid \code{size}, \code{prob} or \code{p0} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rzmbinom}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}zmbinom} use \code{\{d,p,q\}binom} for all but the trivial input values and \eqn{p(0)}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dbinom}} for the binomial distribution. \code{\link{dztbinom}} for the zero-truncated binomial distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ dzmbinom(1:5, size = 5, prob = 0.4, p0 = 0.2) (1-0.2) * dbinom(1:5, 5, 0.4)/pbinom(0, 5, 0.4, lower = FALSE) # same ## simple relation between survival functions pzmbinom(0:5, 5, 0.4, p0 = 0.2, lower = FALSE) (1-0.2) * pbinom(0:5, 5, 0.4, lower = FALSE) / pbinom(0, 5, 0.4, lower = FALSE) # same qzmbinom(pzmbinom(1:10, 10, 0.6, p0 = 0.1), 10, 0.6, p0 = 0.1) n <- 8; p <- 0.3; p0 <- 0.025 x <- 0:n title <- paste("ZM Binomial(", n, ", ", p, ", p0 = ", p0, ") and Binomial(", n, ", ", p,") PDF", sep = "") plot(x, dzmbinom(x, n, p, p0), type = "h", lwd = 2, ylab = "p(x)", main = title) points(x, dbinom(x, n, p), pch = 19, col = "red") legend("topright", c("ZT binomial probabilities", "Binomial probabilities"), col = c("black", "red"), lty = c(1, 0), lwd = 2, pch = c(NA, 19)) } \keyword{distribution} actuar/man/adjCoef.Rd0000644000176200001440000001411315147745722014144 0ustar liggesusers\name{adjCoef} \alias{adjCoef} \alias{plot.adjCoef} \title{Adjustment Coefficient} \description{ Compute the adjustment coefficient in ruin theory, or return a function to compute the adjustment coefficient for various reinsurance retentions. } \usage{ adjCoef(mgf.claim, mgf.wait = mgfexp, premium.rate, upper.bound, h, reinsurance = c("none", "proportional", "excess-of-loss"), from, to, n = 101) \method{plot}{adjCoef}(x, xlab = "x", ylab = "R(x)", main = "Adjustment Coefficient", sub = comment(x), type = "l", add = FALSE, \dots) } \arguments{ \item{mgf.claim}{an expression written as a function of \code{x} or of \code{x} and \code{y}, or alternatively the name of a function, giving the moment generating function (mgf) of the claim severity distribution.} \item{mgf.wait}{an expression written as a function of \code{x}, or alternatively the name of a function, giving the mgf of the claims interarrival time distribution. Defaults to an exponential distribution with mean 1.} \item{premium.rate}{if \code{reinsurance = "none"}, a numeric value of the premium rate; otherwise, an expression written as a function of \code{y}, or alternatively the name of a function, giving the premium rate function.} \item{upper.bound}{numeric; an upper bound for the coefficient, usually the upper bound of the support of the claim severity mgf.} \item{h}{an expression written as a function of \code{x} or of \code{x} and \code{y}, or alternatively the name of a function, giving function \eqn{h} in the Lundberg equation (see below); ignored if \code{mgf.claim} is provided.} \item{reinsurance}{the type of reinsurance for the portfolio; can be abbreviated.} \item{from, to}{the range over which the adjustment coefficient will be calculated.} \item{n}{integer; the number of values at which to evaluate the adjustment coefficient.} \item{x}{an object of class \code{"adjCoef"}.} \item{xlab, ylab}{label of the x and y axes, respectively.} \item{main}{main title.} \item{sub}{subtitle, defaulting to the type of reinsurance.} \item{type}{1-character string giving the type of plot desired; see \code{\link[graphics]{plot}} for details.} \item{add}{logical; if \code{TRUE} add to already existing plot.} \item{\dots}{further graphical parameters accepted by \code{\link[graphics]{plot}} or \code{\link[graphics]{lines}}.} } \details{ In the typical case \code{reinsurance = "none"}, the coefficient of determination is the smallest (strictly) positive root of the Lundberg equation% \deqn{h(x) = E[e^{x B - x c W}] = 1}{h(x) = E[exp(x B - x c W)] = 1}% on \eqn{[0, m)}, where \eqn{m =} \code{upper.bound}, \eqn{B} is the claim severity random variable, \eqn{W} is the claim interarrival (or wait) time random variable and \eqn{c =} \code{premium.rate}. The premium rate must satisfy the positive safety loading constraint \eqn{E[B - c W] < 0}. With \code{reinsurance = "proportional"}, the equation becomes \deqn{h(x, y) = E[e^{x y B - x c(y) W}] = 1,}{% h(x, y) = E[exp(x y B - x c(y) W)] = 1,} where \eqn{y} is the retention rate and \eqn{c(y)} is the premium rate function. With \code{reinsurance = "excess-of-loss"}, the equation becomes \deqn{h(x, y) = E[e^{x \min(B, y) - x c(y) W}] = 1,}{% h(x, y) = E[exp(x min(B, y) - x c(y) W)] = 1,} where \eqn{y} is the retention limit and \eqn{c(y)} is the premium rate function. One can use argument \code{h} as an alternative way to provide function \eqn{h(x)} or \eqn{h(x, y)}. This is necessary in cases where random variables \eqn{B} and \eqn{W} are not independent. The root of \eqn{h(x) = 1} is found by minimizing \eqn{(h(x) - 1)^2}. } \value{ If \code{reinsurance = "none"}, a numeric vector of length one. Otherwise, a function of class \code{"adjCoef"} inheriting from the \code{"function"} class. } \references{ Bowers, N. J. J., Gerber, H. U., Hickman, J., Jones, D. and Nesbitt, C. (1986), \emph{Actuarial Mathematics}, Society of Actuaries. Centeno, M. d. L. (2002), Measuring the effects of reinsurance by the adjustment coefficient in the Sparre-Anderson model, \emph{Insurance: Mathematics and Economics} \bold{30}, 37--49. Gerber, H. U. (1979), \emph{An Introduction to Mathematical Risk Theory}, Huebner Foundation. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2008), \emph{Loss Models, From Data to Decisions, Third Edition}, Wiley. } \author{ Christophe Dutang, Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ ## Basic example: no reinsurance, exponential claim severity and wait ## times, premium rate computed with expected value principle and ## safety loading of 20\%. adjCoef(mgfexp, premium = 1.2, upper = 1) ## Same thing, giving function h. h <- function(x) 1/((1 - x) * (1 + 1.2 * x)) adjCoef(h = h, upper = 1) ## Example 11.4 of Klugman et al. (2008) mgfx <- function(x) 0.6 * exp(x) + 0.4 * exp(2 * x) adjCoef(mgfx(x), mgfexp(x, 4), prem = 7, upper = 0.3182) ## Proportional reinsurance, same assumptions as above, reinsurer's ## safety loading of 30\%. mgfx <- function(x, y) mgfexp(x * y) p <- function(x) 1.3 * x - 0.1 h <- function(x, a) 1/((1 - a * x) * (1 + x * p(a))) R1 <- adjCoef(mgfx, premium = p, upper = 1, reins = "proportional", from = 0, to = 1, n = 11) R2 <- adjCoef(h = h, upper = 1, reins = "p", from = 0, to = 1, n = 101) R1(seq(0, 1, length = 10)) # evaluation for various retention rates R2(seq(0, 1, length = 10)) # same plot(R1) # graphical representation plot(R2, col = "green", add = TRUE) # smoother function ## Excess-of-loss reinsurance p <- function(x) 1.3 * levgamma(x, 2, 2) - 0.1 mgfx <- function(x, l) mgfgamma(x, 2, 2) * pgamma(l, 2, 2 - x) + exp(x * l) * pgamma(l, 2, 2, lower = FALSE) h <- function(x, l) mgfx(x, l) * mgfexp(-x * p(l)) R1 <- adjCoef(mgfx, upper = 1, premium = p, reins = "excess-of-loss", from = 0, to = 10, n = 11) R2 <- adjCoef(h = h, upper = 1, reins = "e", from = 0, to = 10, n = 101) plot(R1) plot(R2, col = "green", add = TRUE) } \keyword{optimize} \keyword{univar} actuar/man/ZeroModifiedPoisson.Rd0000644000176200001440000000730715147745722016553 0ustar liggesusers\name{ZeroModifiedPoisson} \alias{ZeroModifiedPoisson} \alias{ZMpoisson} \alias{dzmpois} \alias{pzmpois} \alias{qzmpois} \alias{rzmpois} \title{The Zero-Modified Poisson Distribution} \description{ Density function, distribution function, quantile function, random generation for the Zero-Modified Poisson distribution with parameter \code{lambda} and arbitrary probability at zero \code{p0}. } \usage{ dzmpois(x, lambda, p0, log = FALSE) pzmpois(q, lambda, p0, lower.tail = TRUE, log.p = FALSE) qzmpois(p, lambda, p0, lower.tail = TRUE, log.p = FALSE) rzmpois(n, lambda, p0) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of values to return.} \item{lambda}{vector of (non negative) means.} \item{p0}{probability mass at zero. \code{0 <= p0 <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-modified Poisson distribution is a discrete mixture between a degenerate distribution at zero and a (standard) Poisson. The probability mass function is \eqn{p(0) = p_0}{p(0) = p0} and \deqn{% p(x) = \frac{(1-p_0)}{(1-e^{-\lambda})} f(x)}{% p(x) = (1-p0)/(1-exp(-lambda)) f(x)} for \eqn{x = 1, 2, ...}, \eqn{\lambda > 0} and \eqn{0 \le p_0 \le 1}{0 \le p0 \le 1}, where \eqn{f(x)} is the probability mass function of the Poisson. The cumulative distribution function is \deqn{P(x) = p_0 + (1 - p_0) \left(\frac{F(x) - F(0)}{1 - F(0)}\right).}{% P(x) = p0 + (1 - p0) [F(x) - F(0)]/[1 - F(0)].} The mean is \eqn{(1-p_0) \mu}{(1-p0)m} and the variance is \eqn{(1-p_0) \sigma^2 + p_0(1-p_0) \mu^2}{(1-p0)v + p0(1-p0)m^2}, where \eqn{\mu}{m} and \eqn{\sigma^2}{v} are the mean and variance of the zero-truncated Poisson. In the terminology of Klugman et al. (2012), the zero-modified Poisson is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = 0} and \eqn{b = \lambda}. The special case \code{p0 == 0} is the zero-truncated Poisson. If an element of \code{x} is not integer, the result of \code{dzmpois} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dzmpois} gives the (log) probability mass function, \code{pzmpois} gives the (log) distribution function, \code{qzmpois} gives the quantile function, and \code{rzmpois} generates random deviates. Invalid \code{lambda} or \code{p0} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rzmpois}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}zmpois} use \code{\{d,p,q\}pois} for all but the trivial input values and \eqn{p(0)}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dpois}} for the standard Poisson distribution. \code{\link{dztpois}} for the zero-truncated Poisson distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ dzmpois(0:5, lambda = 1, p0 = 0.2) (1-0.2) * dpois(0:5, lambda = 1)/ppois(0, 1, lower = FALSE) # same ## simple relation between survival functions pzmpois(0:5, 1, p0 = 0.2, lower = FALSE) (1-0.2) * ppois(0:5, 1, lower = FALSE) / ppois(0, 1, lower = FALSE) # same qzmpois(pzmpois(0:10, 1, p0 = 0.7), 1, p0 = 0.7) } \keyword{distribution} actuar/man/severity.Rd0000644000176200001440000000244415147745722014467 0ustar liggesusers\name{severity} \alias{severity} \alias{severity.default} \title{Manipulation of Individual Claim Amounts} \description{ \code{severity} is a generic function created to manipulate individual claim amounts. The function invokes particular \emph{methods} which depend on the \code{\link[base]{class}} of the first argument. } \usage{ severity(x, ...) \method{severity}{default}(x, bycol = FALSE, drop = TRUE, \dots) } \arguments{ \item{x}{an \R object.} \item{bycol}{logical; whether to \dQuote{unroll} horizontally (\code{FALSE}) or vertically (\code{TRUE})} \item{\dots}{further arguments to be passed to or from other methods.} \item{drop}{logical; if \code{TRUE}, the result is coerced to the lowest possible dimension.} } \details{ Currently, the default method is equivalent to \code{\link{unroll}}. This is liable to change since the link between the name and the use of the function is rather weak. } \value{ A vector or matrix. } \seealso{ \code{\link{severity.portfolio}} for the original motivation of these functions. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Louis-Philippe Pouliot } \examples{ x <- list(c(1:3), c(1:8), c(1:4), c(1:3)) (mat <- matrix(x, 2, 2)) severity(mat) severity(mat, bycol = TRUE) } \keyword{datagen} \keyword{manip} actuar/man/rmixture.Rd0000644000176200001440000000730115147745722014471 0ustar liggesusers\name{rmixture} \alias{rmixture} \title{Simulation from Discrete Mixtures} \description{ Generate random variates from a discrete mixture of distributions. } \usage{ rmixture(n, probs, models, shuffle = TRUE) } \arguments{ \item{n}{number of random variates to generate. If \code{length(n) > 1}, the length is taken to be the number required.} \item{probs}{numeric non-negative vector specifying the probability for each model; is internally normalized to sum 1. Infinite and missing values are not allowed. Values are recycled as necessary to match the length of \code{models}.} \item{models}{vector of expressions specifying the simulation models with the number of variates omitted; see Details. Models are recycled as necessary to match the length of \code{probs}.} \item{shuffle}{logical; should the random variates from the distributions be shuffled?} } \details{ \code{rmixture} generates variates from a discrete mixture, that is the random variable with a probability density function of the form \deqn{f(x) = p_1 f_1(x) + ... + p_n f_n(x),} where \eqn{f_1, \dots, f_n} are densities and \eqn{\sum_{i = 1}^n p_i = 1}{p_1 + \dots + p_n = 1}. The values in \code{probs} will be internally normalized to be used as probabilities \eqn{p_1 + \dots + p_n}. The specification of simulation models uses the syntax of \code{\link{rcomphierarc}}. Models \eqn{f_1, \dots, f_n} are expressed in a semi-symbolic fashion using an object of mode \code{\link[base]{expression}} where each element is a complete call to a random number generation function, with the number of variates omitted. The argument of the random number generation functions for the number of variates to simulate \strong{must} be named \code{n}. If \code{shuffle} is \code{FALSE}, the output vector contains all the random variates from the first model, then all the random variates from the second model, and so on. If the order of the variates is irrelevant, this cuts the time to generate the variates roughly in half. } \note{ Building the expressions in \code{models} from the arguments of another function is delicate. The expressions must be such that evaluation is possible in the frame of \code{rmixture} or its parent. See the examples. } \value{ A vector of random variates from the mixture with density \eqn{f(x)}. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \seealso{ \code{\link{rcompound}} to simulate from compound models. \code{\link{rcomphierarc}} to simulate from compound hierarchical models. } \examples{ ## Mixture of two exponentials (with means 1/3 and 1/7) with equal ## probabilities. rmixture(10, 0.5, expression(rexp(3), rexp(7))) rmixture(10, 42, expression(rexp(3), rexp(7))) # same ## Mixture of two lognormals with different probabilities. rmixture(10, probs = c(0.55, 0.45), models = expression(rlnorm(3.6, 0.6), rlnorm(4.6, 0.3))) ## Building the model expressions in the following example ## works as 'rate' is defined in the parent frame of ## 'rmixture'. probs <- c(2, 5) g <- function(n, p, rate) rmixture(n, p, expression(rexp(rate[1]), rexp(rate[2]))) g(10, probs, c(3, 7)) ## The following example does not work: 'rate' does not exist ## in the evaluation frame of 'rmixture'. f <- function(n, p, model) rmixture(n, p, model) h <- function(n, p, rate) f(n, p, expression(rexp(rate[1]), rexp(rate[2]))) \dontrun{h(10, probs, c(3, 7))} ## Fix: substitute the values in the model expressions. h <- function(n, p, rate) { models <- eval(substitute(expression(rexp(a[1]), rexp(a[2])), list(a = rate))) f(n, p, models) } h(10, probs, c(3, 7)) } \keyword{datagen} actuar/man/betaint.Rd0000644000176200001440000000633615147745722014247 0ustar liggesusers\name{betaint} \alias{betaint} \title{The \dQuote{Beta Integral}} \description{ The \dQuote{beta integral} is just a multiple of the non regularized incomplete beta function. This function provides an R interface to the C level routine. It is not exported by the package. } \usage{ betaint(x, a, b) } \arguments{ \item{x}{vector of quantiles.} \item{a, b}{parameters. See Details for admissible values.} } \details{ Function \code{betaint} computes the \dQuote{beta integral} \deqn{ B(a, b; x) = \Gamma(a + b) \int_0^x t^{a-1} (1-t)^{b-1} dt}{% B(a, b; x) = Gamma(a + b) int_0^x t^(a-1) (1-t)^(b-1) dt} for \eqn{a > 0}, \eqn{b \neq -1, -2, \ldots}{b != -1, -2, \ldots} and \eqn{0 < x < 1}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) When \eqn{b > 0}, \deqn{ B(a, b; x) = \Gamma(a) \Gamma(b) I_x(a, b),} where \eqn{I_x(a, b)} is \code{pbeta(x, a, b)}. When \eqn{b < 0}, \eqn{b \neq -1, -2, \ldots}{b != -1, -2, \ldots}, and \eqn{a > 1 + [-b]}{a > 1 + floor(-b)}, \deqn{% \begin{array}{rcl} B(a, b; x) &=& \displaystyle -\Gamma(a + b) \left[ \frac{x^{a-1} (1-x)^b}{b} + \frac{(a-1) x^{a-2} (1-x)^{b+1}}{b (b+1)} \right. \\ & & \displaystyle\left. + \cdots + \frac{(a-1) \cdots (a-r) x^{a-r-1} (1-x)^{b+r}}{b (b+1) \cdots (b+r)} \right] \\ & & \displaystyle + \frac{(a-1) \cdots (a-r-1)}{b (b+1) \cdots (b+r)} \Gamma(a-r-1) \\ & & \times \Gamma(b+r+1) I_x(a-r-1, b+r+1), \end{array}}{% B(a, b; x) = -Gamma(a+b) \{(x^(a-1) (1-x)^b)/b + [(a-1) x^(a-2) (1-x)^(b+1)]/[b(b+1)] + \dots + [(a-1)\dots(a-r) x^(a-r-1) (1-x)^(b+r)]/[b(b+1)\dots(b+r)]\} + [(a-1)\dots(a-r-1)]/[b(b+1)\dots(b+r)] Gamma(a-r-1) * Gamma(b+r+1) I_x(a-r-1, b+r+1),} where \eqn{r = [-b]}{r = floor(-b)}. This function is used (at the C level) to compute the limited expected value for distributions of the transformed beta family; see, for example, \code{\link{levtrbeta}}. } \value{ The value of the integral. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ The need for this function in the package is well explained in the introduction of Appendix A of Klugman et al. (2012). See also chapter 6 and 15 of Abramowitz and Stegun (1972) for definitions and relations to the hypergeometric series. } \references{ Abramowitz, M. and Stegun, I. A. (1972), \emph{Handbook of Mathematical Functions}, Dover. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ x <- 0.3 a <- 7 ## case with b > 0 b <- 2 actuar:::betaint(x, a, b) gamma(a) * gamma(b) * pbeta(x, a, b) # same ## case with b < 0 b <- -2.2 r <- floor(-b) # r = 2 actuar:::betaint(x, a, b) ## "manual" calculation s <- (x^(a-1) * (1-x)^b)/b + ((a-1) * x^(a-2) * (1-x)^(b+1))/(b * (b+1)) + ((a-1) * (a-2) * x^(a-3) * (1-x)^(b+2))/(b * (b+1) * (b+2)) -gamma(a+b) * s + (a-1)*(a-2)*(a-3) * gamma(a-r-1)/(b*(b+1)*(b+2)) * gamma(b+r+1)*pbeta(x, a-r-1, b+r+1) } \keyword{math} \keyword{distribution} actuar/man/Pareto.Rd0000644000176200001440000000732315147745722014050 0ustar liggesusers\name{Pareto} \alias{Pareto} \alias{dpareto} \alias{ppareto} \alias{qpareto} \alias{rpareto} \alias{mpareto} \alias{levpareto} \title{The Pareto Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Pareto distribution with parameters \code{shape} and \code{scale}. } \usage{ dpareto(x, shape, scale, log = FALSE) ppareto(q, shape, scale, lower.tail = TRUE, log.p = FALSE) qpareto(p, shape, scale, lower.tail = TRUE, log.p = FALSE) rpareto(n, shape, scale) mpareto(order, shape, scale) levpareto(limit, shape, scale, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape, scale}{parameters. Must be strictly positive.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The Pareto distribution with parameters \code{shape} \eqn{= \alpha}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\alpha \theta^\alpha}{(x + \theta)^{\alpha + 1}}}{% f(x) = a s^a / (x + s)^(a + 1)} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0} and \eqn{\theta}{s > 0}. There are many different definitions of the Pareto distribution in the literature; see Arnold (2015) or Kleiber and Kotz (2003). In the nomenclature of \pkg{actuar}, The \dQuote{Pareto distribution} does not have a location parameter. The version with a location parameter is the \link[=dpareto2]{Pareto II}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}, \eqn{-1 < k < \alpha}{-1 < k < shape}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -1} and \eqn{\alpha - k}{shape - k} not a negative integer. } \value{ \code{dpareto} gives the density, \code{ppareto} gives the distribution function, \code{qpareto} gives the quantile function, \code{rpareto} generates random deviates, \code{mpareto} gives the \eqn{k}th raw moment, and \code{levpareto} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levpareto} computes the limited expected value using \code{\link{betaint}}. The version of the Pareto defined for \eqn{x > \theta}{x > s} is named Single Parameter Pareto, or Pareto I, in \pkg{actuar}. } \seealso{ \code{\link{dpareto2}} for an equivalent distribution with location parameter. \code{\link{dpareto1}} for the Single Parameter Pareto distribution. \code{"distributions"} package vignette for details on the interrelations between the continuous size distributions in \pkg{actuar} and complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dpareto(2, 3, 4, log = TRUE)) p <- (1:10)/10 ppareto(qpareto(p, 2, 3), 2, 3) ## variance mpareto(2, 4, 1) - mpareto(1, 4, 1)^2 ## case with shape - order > 0 levpareto(10, 3, scale = 1, order = 2) ## case with shape - order < 0 levpareto(10, 1.5, scale = 1, order = 2) } \keyword{distribution} actuar/man/grouped.data.Rd0000644000176200001440000001131615147745722015170 0ustar liggesusers\name{grouped.data} \alias{grouped.data} \title{Grouped data} \description{ Creation of grouped data objects, from either a provided set of group boundaries and group frequencies, or from individual data using automatic or specified breakpoints. } \usage{ grouped.data(\dots, breaks = "Sturges", include.lowest = TRUE, right = TRUE, nclass = NULL, group = FALSE, row.names = NULL, check.rows = FALSE, check.names = TRUE) } \arguments{ \item{\dots}{arguments of the form \code{value} or \code{tag = value}; see Details.} \item{breaks}{same as for \code{\link{hist}}, namely one of: \itemize{ \item{a vector giving the breakpoints between groups;} \item{a function to compute the vector of breakpoints;} \item{a single number giving the number of groups;} \item{a character string naming an algorithm to compute the number of groups (see \code{\link{hist}});} \item{a function to compute the number of groups.} } In the last three cases the number is a suggestion only; the breakpoints will be set to \code{\link{pretty}} values. If \code{breaks} is a function, the first element in \code{\dots} is supplied to it as the only argument. } \item{include.lowest}{logical; if \code{TRUE}, a data point equal to the \code{breaks} value will be included in the first (or last, for \code{right = FALSE}) group. Used only for individual data; see Details.} \item{right}{logical; indicating if the intervals should be closed on the right (and open on the left) or vice versa.} \item{nclass}{numeric (integer); equivalent to \code{breaks} for a scalar or character argument.} \item{group}{logical; an alternative way to force grouping of individual data.} \item{row.names, check.rows, check.names}{arguments identical to those of \code{\link{data.frame}}.} } \details{ A grouped data object is a special form of data frame consisting of one column of contiguous group boundaries and one or more columns of frequencies within each group. The function can create a grouped data object from two types of arguments. \enumerate{ \item{Group boundaries and frequencies. This is the default mode of operation if the call has at least two elements in \code{\dots}. The first argument will then be taken as the vector of group boundaries. This vector must be exactly one element longer than the other arguments, which will be taken as vectors of group frequencies. All arguments are coerced to data frames.} \item{Individual data. This mode of operation is active if there is a single argument in \code{\dots}, or if either \code{breaks} or \code{nclass} is specified or \code{group} is \code{TRUE}. Arguments of \code{\dots} are first grouped using \code{\link{hist}}. If needed, breakpoints are set using the first argument.} } Missing (\code{NA}) frequencies are replaced by zeros, with a warning. Extraction and replacement methods exist for \code{grouped.data} objects, but working on non adjacent groups will most likely yield useless results. } \value{ An object of \code{class} \code{c("grouped.data", "data.frame")} with an environment containing the vector \code{cj} of group boundaries. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \seealso{ \code{\link{[.grouped.data}} for extraction and replacement methods. \code{\link{data.frame}} for usual data frame creation and manipulation. \code{\link{hist}} for details on the calculation of breakpoints. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Mathieu Pigeon and Louis-Philippe Pouliot } \examples{ ## Most common usage using a predetermined set of group ## boundaries and group frequencies. cj <- c(0, 25, 50, 100, 250, 500, 1000) nj <- c(30, 31, 57, 42, 45, 10) (x <- grouped.data(Group = cj, Frequency = nj)) class(x) x[, 1] # group boundaries x[, 2] # group frequencies ## Multiple frequency columns are supported x <- sample(1:100, 9) y <- sample(1:100, 9) grouped.data(cj = 1:10, nj.1 = x, nj.2 = y) ## Alternative usage with grouping of individual data. grouped.data(x) # automatic breakpoints grouped.data(x, breaks = 7) # forced number of groups grouped.data(x, breaks = c(0,25,75,100)) # specified groups grouped.data(x, y, breaks = c(0,25,75,100)) # multiple data sets \dontrun{## Providing two or more data sets and automatic breakpoints is ## very error-prone since the range of the first data set has to ## include the ranges of all the other data sets. range(x) range(y) grouped.data(x, y, group = TRUE)} } \keyword{classes} \keyword{methods} actuar/man/dental.Rd0000644000176200001440000000055015147745722014060 0ustar liggesusers\name{dental} \docType{data} \alias{dental} \title{Individual Dental Claims Data Set} \description{ Basic dental claims on a policy with a deductible of 50. } \usage{dental} \format{A vector containing 10 observations} \source{Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \keyword{datasets} actuar/man/cm.Rd0000644000176200001440000004256315147745722013222 0ustar liggesusers\name{cm} \alias{cm} \alias{print.cm} \alias{predict.cm} \alias{summary.cm} \alias{print.summary.cm} \title{Credibility Models} \description{ Fit the following credibility models: \enc{Bühlmann}{Buhlmann}, \enc{Bühlmann}{Buhlmann}-Straub, hierarchical, regression (Hachemeister) or linear Bayes. } \usage{ cm(formula, data, ratios, weights, subset, regformula = NULL, regdata, adj.intercept = FALSE, method = c("Buhlmann-Gisler", "Ohlsson", "iterative"), likelihood, ..., tol = sqrt(.Machine$double.eps), maxit = 100, echo = FALSE) \method{print}{cm}(x, \dots) \method{predict}{cm}(object, levels = NULL, newdata, \dots) \method{summary}{cm}(object, levels = NULL, newdata, \dots) \method{print}{summary.cm}(x, \dots) } \arguments{ \item{formula}{character string \code{"bayes"} or an object of class \code{"\link[stats]{formula}"}: a symbolic description of the model to be fit. The details of model specification are given below.} \item{data}{a matrix or a data frame containing the portfolio structure, the ratios or claim amounts and their associated weights, if any.} \item{ratios}{expression indicating the columns of \code{data} containing the ratios or claim amounts.} \item{weights}{expression indicating the columns of \code{data} containing the weights associated with \code{ratios}.} \item{subset}{an optional logical expression indicating a subset of observations to be used in the modeling process. All observations are included by default.} \item{regformula}{an object of class \code{"\link[stats]{formula}"}: symbolic description of the regression component (see \code{\link[stats]{lm}} for details). No left hand side is needed in the formula; if present it is ignored. If \code{NULL}, no regression is done on the data.} \item{regdata}{an optional data frame, list or environment (or object coercible by \code{\link[base]{as.data.frame}} to a data frame) containing the variables in the regression model.} \item{adj.intercept}{if \code{TRUE}, the intercept of the regression model is located at the barycenter of the regressor instead of the origin.} \item{method}{estimation method for the variance components of the model; see Details.} \item{likelihood}{a character string giving the name of the likelihood function in one of the supported linear Bayes cases; see Details.} \item{tol}{tolerance level for the stopping criteria for iterative estimation method.} \item{maxit}{maximum number of iterations in iterative estimation method.} \item{echo}{logical; whether to echo the iterative procedure or not.} \item{x, object}{an object of class \code{"cm"}.} \item{levels}{character vector indicating the levels to predict or to include in the summary; if \code{NULL} all levels are included.} \item{newdata}{data frame containing the variables used to predict credibility regression models.} \item{\dots}{parameters of the prior distribution for \code{cm}; additional attributes to attach to the result for the \code{predict} and \code{summary} methods; further arguments to \code{\link[base]{format}} for the \code{print.summary} method; unused for the \code{print} method.} } \details{ \code{cm} is the unified front end for credibility models fitting. The function supports hierarchical models with any number of levels (with \enc{Bühlmann}{Buhlmann} and \enc{Bühlmann}{Buhlmann}-Straub models as special cases) and the regression model of Hachemeister. Usage of \code{cm} is similar to \code{\link[stats]{lm}} for these cases. \code{cm} can also fit linear Bayes models, in which case usage is much simplified; see the section on linear Bayes below. When not \code{"bayes"}, the \code{formula} argument symbolically describes the structure of the portfolio in the form \eqn{~ terms}. Each term is an interaction between risk factors contributing to the total variance of the portfolio data. Terms are separated by \code{+} operators and interactions within each term by \code{:}. For a portfolio divided first into sectors, then units and finally contracts, \code{formula} would be \code{~ sector + sector:unit + sector:unit:contract}, where \code{sector}, \code{unit} and \code{contract} are column names in \code{data}. In general, the formula should be of the form \code{~ a + a:b + a:b:c + a:b:c:d + ...}. If argument \code{regformula} is not \code{NULL}, the regression model of Hachemeister is fit to the data. The response is usually time. By default, the intercept of the model is located at time origin. If argument \code{adj.intercept} is \code{TRUE}, the intercept is moved to the (collective) barycenter of time, by orthogonalization of the design matrix. Note that the regression coefficients may be difficult to interpret in this case. Arguments \code{ratios}, \code{weights} and \code{subset} are used like arguments \code{select}, \code{select} and \code{subset}, respectively, of function \code{\link[base]{subset}}. Data does not have to be sorted by level. Nodes with no data (complete lines of \code{NA} except for the portfolio structure) are allowed, with the restriction mentioned above. The \code{print} methods use the option \code{deparse.cutoff} to control the printing of the call to \code{cm}. } \section{Hierarchical models}{ The credibility premium at one level is a convex combination between the linearly sufficient statistic of a node and the credibility premium of the level above. (For the first level, the complement of credibility is given to the collective premium.) The linearly sufficient statistic of a node is the credibility weighted average of the data of the node, except at the last level, where natural weights are used. The credibility factor of node \eqn{i} is equal to \deqn{\frac{w_i}{w_i + a/b},}{w[i]/(w[i] + a/b),} where \eqn{w_i}{w[i]} is the weight of the node used in the linearly sufficient statistic, \eqn{a} is the average within node variance and \eqn{b} is the average between node variance. } \section{Regression models}{ The credibility premium of node \eqn{i} is equal to \deqn{y^\prime b_i^a,}{y' ba[i],} where \eqn{y} is a matrix created from \code{newdata} and \eqn{b_i^a}{ba[i]} is the vector of credibility adjusted regression coefficients of node \eqn{i}. The latter is given by \deqn{b_i^a = Z_i b_i + (I - Z_I) m,}{ ba[i] = Z[i] b[i] + (I - Z[i]) m,} where \eqn{b_i}{b[i]} is the vector of regression coefficients based on data of node \eqn{i} only, \eqn{m} is the vector of collective regression coefficients, \eqn{Z_i}{Z[i]} is the credibility matrix and \eqn{I} is the identity matrix. The credibility matrix of node \eqn{i} is equal to \deqn{A^{-1} (A + s^2 S_i),}{A^(-1) (A + s2 S[i]),} where \eqn{S_i}{S[i]} is the unscaled regression covariance matrix of the node, \eqn{s^2}{s2} is the average within node variance and \eqn{A} is the within node covariance matrix. If the intercept is positioned at the barycenter of time, matrices \eqn{S_i}{S[i]} and \eqn{A} (and hence \eqn{Z_i}{Z[i]}) are diagonal. This amounts to use \enc{Bühlmann}{Buhlmann}-Straub models for each regression coefficient. Argument \code{newdata} provides the \dQuote{future} value of the regressors for prediction purposes. It should be given as specified in \code{\link[stats]{predict.lm}}. } \section{Variance components estimation}{ For hierarchical models, two sets of estimators of the variance components (other than the within node variance) are available: unbiased estimators and iterative estimators. Unbiased estimators are based on sums of squares of the form \deqn{B_i = \sum_j w_{ij} (X_{ij} - \bar{X}_i)^2 - (J - 1) a}{% B[i] = sum(j; w[ij] (X[ij] - Xb[i])^2 - (J - 1) a)}% and constants of the form \deqn{c_i = w_i - \sum_j \frac{w_{ij}^2}{w_i},}{% c[i] = w[i] - sum(j; w[ij]^2)/w[i],}% where \eqn{X_{ij}}{X[ij]} is the linearly sufficient statistic of level \eqn{(ij)}; \eqn{\bar{X_{i}}}{Xb[i]} is the weighted average of the latter using weights \eqn{w_{ij}}{w[ij]}; \eqn{w_i = \sum_j w_{ij}}{w[i] = sum(j; w[ij])}; \eqn{J} is the effective number of nodes at level \eqn{(ij)}; \eqn{a} is the within variance of this level. Weights \eqn{w_{ij}}{w[ij]} are the natural weights at the lowest level, the sum of the natural weights the next level and the sum of the credibility factors for all upper levels. The \enc{Bühlmann}{Buhlmann}-Gisler estimators (\code{method = "Buhlmann-Gisler"}) are given by% \deqn{b = \frac{1}{I} \sum_i \max \left( \frac{B_i}{c_i}, 0 \right),}{% b = mean(max(B[i]/c[i], 0)),}% that is the average of the per node variance estimators truncated at 0. The Ohlsson estimators (\code{method = "Ohlsson"}) are given by \deqn{b = \frac{\sum_i B_i}{\sum_i c_i},}{% b = sum(i; B[i]) / sum(i; c[i]),}% that is the weighted average of the per node variance estimators without any truncation. Note that negative estimates will be truncated to zero for credibility factor calculations. In the \enc{Bühlmann}{Buhlmann}-Straub model, these estimators are equivalent. Iterative estimators \code{method = "iterative"} are pseudo-estimators of the form \deqn{b = \frac{1}{d} \sum_i w_i (X_i - \bar{X})^2,}{% b = sum(i; w[i] * (X[i] - Xb)^2)/d,} where \eqn{X_i}{X[i]} is the linearly sufficient statistic of one level, \eqn{\bar{X}}{Xb} is the linearly sufficient statistic of the level above and \eqn{d} is the effective number of nodes at one level minus the effective number of nodes of the level above. The Ohlsson estimators are used as starting values. For regression models, with the intercept at time origin, only iterative estimators are available. If \code{method} is different from \code{"iterative"}, a warning is issued. With the intercept at the barycenter of time, the choice of estimators is the same as in the \enc{Bühlmann}{Buhlmann}-Straub model. } \section{Linear Bayes}{ When \code{formula} is \code{"bayes"}, the function computes pure Bayesian premiums for the following combinations of distributions where they are linear credibility premiums: \itemize{ \item{\eqn{X|\Theta = \theta \sim \mathrm{Poisson}(\theta)}{X|\Theta ~ Poisson(\Theta)} and \eqn{\Theta \sim \mathrm{Gamma}(\alpha, \lambda)}{\Theta ~ Gamma(\alpha, \lambda)};} \item{\eqn{X|\Theta = \theta \sim \mathrm{Exponential}(\theta)}{X|\Theta ~ Exponential(\Theta)} and \eqn{\Theta \sim \mathrm{Gamma}(\alpha, \lambda)}{\Theta ~ Gamma(\alpha, \lambda)};} \item{\eqn{X|\Theta = \theta \sim \mathrm{Gamma}(\tau, \theta)}{X|\Theta ~ Gamma(\tau, \Theta)} and \eqn{\Theta \sim \mathrm{Gamma}(\alpha, \lambda)}{\Theta ~ Gamma(\alpha, \lambda)};} \item{\eqn{X|\Theta = \theta \sim \mathrm{Normal}(\theta, \sigma_2^2)}{X|\Theta ~ Normal(\Theta, \sigma_2^2)} and \eqn{\Theta \sim \mathrm{Normal}(\mu, \sigma_1^2)}{\Theta ~ Normal(\mu, \sigma_1^2)};} \item{\eqn{X|\Theta = \theta \sim \mathrm{Bernoulli}(\theta)}{X|\Theta ~ Bernoulli(\Theta)} and \eqn{\Theta \sim \mathrm{Beta}(a, b)}{\Theta ~ Beta(a, b)};} \item{\eqn{X|\Theta = \theta \sim \mathrm{Binomial}(\nu, \theta)}{X|\Theta ~ Binomial(\nu, \Theta)} and \eqn{\Theta \sim \mathrm{Beta}(a, b)}{\Theta ~ Beta(a, b)};} \item{\eqn{X|\Theta = \theta \sim \mathrm{Geometric}(\theta)}{X|\Theta = \theta ~ Geometric(\theta)} and \eqn{\Theta \sim \mathrm{Beta}(a, b)}{\Theta ~ Beta(a, b)}.} \item{\eqn{X|\Theta = \theta \sim \mathrm{Negative~Binomial}(r, \theta)}{X|\Theta ~ Negative Binomial(r, \Theta)} and \eqn{\Theta \sim \mathrm{Beta}(a, b)}{\Theta ~ Beta(a, b)}.}} The following combination is also supported: \eqn{X|\Theta = \theta \sim \mathrm{Single~Parameter~Pareto}(\theta)}{X|\Theta ~ Single Parameter Pareto(\Theta)} and \eqn{\Theta \sim \mathrm{Gamma}(\alpha, \lambda)}{\Theta ~ Gamma(\alpha, \lambda)}. In this case, the Bayesian estimator not of the risk premium, but rather of parameter \eqn{\theta} is linear with a \dQuote{credibility} factor that is not restricted to \eqn{(0, 1)}. Argument \code{likelihood} identifies the distribution of \eqn{X|\Theta = \theta} as one of \code{"poisson"}, \code{"exponential"}, \code{"gamma"}, \code{"normal"}, \code{"bernoulli"}, \code{"binomial"}, \code{"geometric"}, \code{"negative binomial"} or \code{"pareto"}. The parameters of the distributions of \eqn{X|\Theta = \theta} (when needed) and \eqn{\Theta} are set in \code{\dots} using the argument names (and default values) of \code{\link[stats]{dgamma}}, \code{\link[stats]{dnorm}}, \code{\link[stats]{dbeta}}, \code{\link[stats]{dbinom}}, \code{\link[stats]{dnbinom}} or \code{dpareto1}, as appropriate. For the Gamma/Gamma case, use \code{shape.lik} for the shape parameter \eqn{\tau} of the Gamma likelihood. For the Normal/Normal case, use \code{sd.lik} for the standard error \eqn{\sigma_2} of the Normal likelihood. Data for the linear Bayes case may be a matrix or data frame as usual; an atomic vector to fit the model to a single contract; missing or \code{NULL} to fit the prior model. Arguments \code{ratios}, \code{weights} and \code{subset} are ignored. } \value{ Function \code{cm} computes the structure parameters estimators of the model specified in \code{formula}. The value returned is an object of class \code{cm}. An object of class \code{"cm"} is a list with at least the following components: \item{means}{a list containing, for each level, the vector of linearly sufficient statistics.} \item{weights}{a list containing, for each level, the vector of total weights.} \item{unbiased}{a vector containing the unbiased variance components estimators, or \code{NULL}.} \item{iterative}{a vector containing the iterative variance components estimators, or \code{NULL}.} \item{cred}{for multi-level hierarchical models: a list containing, the vector of credibility factors for each level. For one-level models: an array or vector of credibility factors.} \item{nodes}{a list containing, for each level, the vector of the number of nodes in the level.} \item{classification}{the columns of \code{data} containing the portfolio classification structure.} \item{ordering}{a list containing, for each level, the affiliation of a node to the node of the level above.} Regression fits have in addition the following components: \item{adj.models}{a list containing, for each node, the credibility adjusted regression model as obtained with \code{\link[stats]{lm.fit}} or \code{\link[stats]{lm.wfit}}.} \item{transition}{if \code{adj.intercept} is \code{TRUE}, a transition matrix from the basis of the orthogonal design matrix to the basis of the original design matrix.} \item{terms}{the \code{\link[stats]{terms}} object used.} The method of \code{predict} for objects of class \code{"cm"} computes the credibility premiums for the nodes of every level included in argument \code{levels} (all by default). Result is a list the same length as \code{levels} or the number of levels in \code{formula}, or an atomic vector for one-level models. } \references{ \enc{Bühlmann}{Buhlmann}, H. and Gisler, A. (2005), \emph{A Course in Credibility Theory and its Applications}, Springer. Belhadj, H., Goulet, V. and Ouellet, T. (2009), On parameter estimation in hierarchical credibility, \emph{Astin Bulletin} \bold{39}. Goulet, V. (1998), Principles and application of credibility theory, \emph{Journal of Actuarial Practice} \bold{6}, ISSN 1064-6647. Goovaerts, M. J. and Hoogstad, W. J. (1987), \emph{Credibility Theory}, Surveys of Actuarial Studies, No. 4, Nationale-Nederlanden N.V. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Xavier Milhaud, Tommy Ouellet, Louis-Philippe Pouliot } \seealso{ \code{\link[base]{subset}}, \code{\link[stats]{formula}}, \code{\link[stats]{lm}}, \code{\link[stats]{predict.lm}}. } \examples{ data(hachemeister) ## Buhlmann-Straub model fit <- cm(~state, hachemeister, ratios = ratio.1:ratio.12, weights = weight.1:weight.12) fit # print method predict(fit) # credibility premiums summary(fit) # more details ## Two-level hierarchical model. Notice that data does not have ## to be sorted by level X <- data.frame(unit = c("A", "B", "A", "B", "B"), hachemeister) fit <- cm(~unit + unit:state, X, ratio.1:ratio.12, weight.1:weight.12) predict(fit) predict(fit, levels = "unit") # unit credibility premiums only summary(fit) summary(fit, levels = "unit") # unit summaries only ## Regression model with intercept at time origin fit <- cm(~state, hachemeister, regformula = ~time, regdata = data.frame(time = 12:1), ratios = ratio.1:ratio.12, weights = weight.1:weight.12) fit predict(fit, newdata = data.frame(time = 0)) summary(fit, newdata = data.frame(time = 0)) ## Same regression model, with intercept at barycenter of time fit <- cm(~state, hachemeister, adj.intercept = TRUE, regformula = ~time, regdata = data.frame(time = 12:1), ratios = ratio.1:ratio.12, weights = weight.1:weight.12) fit predict(fit, newdata = data.frame(time = 0)) summary(fit, newdata = data.frame(time = 0)) ## Poisson/Gamma pure Bayesian model fit <- cm("bayes", data = c(5, 3, 0, 1, 1), likelihood = "poisson", shape = 3, rate = 3) fit predict(fit) summary(fit) ## Normal/Normal pure Bayesian model cm("bayes", data = c(5, 3, 0, 1, 1), likelihood = "normal", sd.lik = 2, mean = 2, sd = 1) } \keyword{models} actuar/man/mean.grouped.data.Rd0000644000176200001440000000217615147745722016113 0ustar liggesusers\name{mean.grouped.data} \alias{mean.grouped.data} \title{Arithmetic Mean} \description{ Mean of grouped data objects. } \usage{ \method{mean}{grouped.data}(x, \dots) } \arguments{ \item{x}{an object of class \code{"grouped.data"}.} \item{\dots}{further arguments passed to or from other methods.} } \details{ The mean of grouped data with group boundaries \eqn{c_0, c_1, \dots, c_r}{c[0], c[1], \dots, c[r]} and group frequencies \eqn{n_1, \dots, n_r}{n[1], \dots, n[r]} is \deqn{\frac{1}{n} \sum_{j = 1}^r a_j n_j,}{% (1/n) * sum(j; a[j] * n[j]),} where \eqn{a_j = (c_{j - 1} + c_j)/2}{a[j] = (c[j - 1] + c[j])/2} is the midpoint of the \eqn{j}th interval, and \eqn{n = \sum_{j = 1}^r n_j}{n = sum(j; n[j])}. } \value{ A named vector of means. } \seealso{ \code{\link{grouped.data}} to create grouped data objects; \code{\link{emm}} to compute higher moments. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ data(gdental) mean(gdental) } \keyword{univar} actuar/man/gdental.Rd0000644000176200001440000000123215147745722014225 0ustar liggesusers\name{gdental} \docType{data} \alias{gdental} \title{Grouped Dental Claims Data Set} \description{ Grouped dental claims, that is presented in a number of claims per claim amount group form. } \usage{gdental} \format{ An object of class \code{"grouped.data"} (inheriting from class \code{"data.frame"}) consisting of 10 rows and 2 columns. The environment of the object contains the plain vector of \code{cj} of group boundaries } \source{Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. } \seealso{ \code{\link{grouped.data}} for a description of grouped data objects. } \keyword{datasets} actuar/man/InverseParalogistic.Rd0000644000176200001440000000725115147745722016573 0ustar liggesusers\name{InverseParalogistic} \alias{InverseParalogistic} \alias{dinvparalogis} \alias{pinvparalogis} \alias{qinvparalogis} \alias{rinvparalogis} \alias{minvparalogis} \alias{levinvparalogis} \title{The Inverse Paralogistic Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Inverse Paralogistic distribution with parameters \code{shape} and \code{scale}. } \usage{ dinvparalogis(x, shape, rate = 1, scale = 1/rate, log = FALSE) pinvparalogis(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qinvparalogis(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rinvparalogis(n, shape, rate = 1, scale = 1/rate) minvparalogis(order, shape, rate = 1, scale = 1/rate) levinvparalogis(limit, shape, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The inverse paralogistic distribution with parameters \code{shape} \eqn{= \tau}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\tau^2 (x/\theta)^{\tau^2}}{% x [1 + (x/\theta)^\tau]^{\tau + 1}}}{% f(x) = a^2 (x/s)^(a^2)/(x [1 + (x/s)^a]^(a + 1))} for \eqn{x > 0}, \eqn{\tau > 0}{a > 0} and \eqn{\theta > 0}{b > 0}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{-\tau^2 < k < \tau}{-shape^2 < k < shape}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\tau^2}{k > -shape^2} and \eqn{1 - k/\tau}{1 - k/shape} not a negative integer. } \value{ \code{dinvparalogis} gives the density, \code{pinvparalogis} gives the distribution function, \code{qinvparalogis} gives the quantile function, \code{rinvparalogis} generates random deviates, \code{minvparalogis} gives the \eqn{k}th raw moment, and \code{levinvparalogis} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levinvparalogis} computes computes the limited expected value using \code{\link{betaint}}. See Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dinvparalogis(2, 3, 4, log = TRUE)) p <- (1:10)/10 pinvparalogis(qinvparalogis(p, 2, 3), 2, 3) ## first negative moment minvparalogis(-1, 2, 2) ## case with 1 - order/shape > 0 levinvparalogis(10, 2, 2, order = 1) ## case with 1 - order/shape < 0 levinvparalogis(10, 2/3, 2, order = 1) } \keyword{distribution} actuar/man/Gumbel.Rd0000644000176200001440000000641215147745722014027 0ustar liggesusers\name{Gumbel} \alias{Gumbel} \alias{dgumbel} \alias{pgumbel} \alias{qgumbel} \alias{rgumbel} \alias{mgumbel} \alias{mgfgumbel} \title{The Gumbel Distribution} \description{ Density function, distribution function, quantile function, random generation and raw moments for the Gumbel extreme value distribution with parameters \code{alpha} and \code{scale}. } \usage{ dgumbel(x, alpha, scale, log = FALSE) pgumbel(q, alpha, scale, lower.tail = TRUE, log.p = FALSE) qgumbel(p, alpha, scale, lower.tail = TRUE, log.p = FALSE) rgumbel(n, alpha, scale) mgumbel(order, alpha, scale) mgfgumbel(t, alpha, scale, log = FALSE) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{alpha}{location parameter.} \item{scale}{parameter. Must be strictly positive.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment. Only values \eqn{1} and \eqn{2} are supported.} \item{t}{numeric vector.} } \details{ The Gumbel distribution with parameters \code{alpha} \eqn{= \alpha}{= a} and \code{scale} \eqn{= \theta}{= s} has distribution function: \deqn{F(x) = \exp[-\exp(-(x - \alpha)/\theta)]}{% F(x) = exp[-exp(-(x - a)/s)],} for \eqn{-\infty < x < \infty}{-Inf < x < Inf}, \eqn{-\infty < a < \infty}{-Inf < a < Inf} and \eqn{\theta > 0}{s > 0}. The mode of the distribution is in \eqn{\alpha}{a}, the mean is \eqn{\alpha + \gamma\theta}{a + g * s}, where \eqn{\gamma}{g} \eqn{= 0.57721566} is the Euler-Mascheroni constant, and the variance is \eqn{\pi^2 \theta^2/6}{(pi * s)^2/6}. } \value{ \code{dgumbel} gives the density, \code{pgumbel} gives the distribution function, \code{qgumbel} gives the quantile function, \code{rgumbel} generates random deviates, \code{mgumbel} gives the \eqn{k}th raw moment, \eqn{k = 1, 2}, and \code{mgfgamma} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ Distribution also knonw as the generalized extreme value distribution Type-I. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ dgumbel(c(-5, 0, 10, 20), 0.5, 2) p <- (1:10)/10 pgumbel(qgumbel(p, 2, 3), 2, 3) curve(pgumbel(x, 0.5, 2), from = -5, to = 20, col = "red") curve(pgumbel(x, 1.0, 2), add = TRUE, col = "green") curve(pgumbel(x, 1.5, 3), add = TRUE, col = "blue") curve(pgumbel(x, 3.0, 4), add = TRUE, col = "cyan") a <- 3; s <- 4 mgumbel(1, a, s) # mean a - s * digamma(1) # same mgumbel(2, a, s) - mgumbel(1, a, s)^2 # variance (pi * s)^2/6 # same } \keyword{distribution} actuar/man/FellerPareto.Rd0000644000176200001440000001405115147745722015176 0ustar liggesusers\name{FellerPareto} \alias{FellerPareto} \alias{dfpareto} \alias{pfpareto} \alias{qfpareto} \alias{rfpareto} \alias{mfpareto} \alias{levfpareto} \title{The Feller Pareto Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Feller Pareto distribution with parameters \code{min}, \code{shape1}, \code{shape2}, \code{shape3} and \code{scale}. } \usage{ dfpareto(x, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, log = FALSE) pfpareto(q, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qfpareto(p, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rfpareto(n, min, shape1, shape2, shape3, rate = 1, scale = 1/rate) mfpareto(order, min, shape1, shape2, shape3, rate = 1, scale = 1/rate) levfpareto(limit, min, shape1, shape2, shape3, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{min}{lower bound of the support of the distribution.} \item{shape1, shape2, shape3, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The Feller-Pareto distribution with parameters \code{min} \eqn{= \mu}{= m}, \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \gamma}{= b}, \code{shape3} \eqn{= \tau}{= c} and \code{scale} \eqn{= \theta}{= s}, has density: \deqn{f(x) = \frac{\Gamma(\alpha + \tau)}{\Gamma(\alpha)\Gamma(\tau)} \frac{\gamma ((x - \mu)/\theta)^{\gamma \tau - 1}}{% \theta [1 + ((x - \mu)/\theta)^\gamma]^{\alpha + \tau}}}{% f(x) = Gamma(a + c)/(Gamma(a) * Gamma(c)) (b ((x - m)/s)^(bc - 1))/% (s [1 + ((x - m)/s)^b]^(a + c))} for \eqn{x > \mu}{x > m}, \eqn{-\infty < \mu < \infty}{-Inf < m < Inf}, \eqn{\alpha > 0}{a > 0}, \eqn{\gamma > 0}{b > 0}, \eqn{\tau > 0}{c > 0} and \eqn{\theta > 0}{s > 0}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The Feller-Pareto is the distribution of the random variable \deqn{\mu + \theta \left(\frac{1 - X}{X}\right)^{1/\gamma},}{% m + s ((1 - X)/X)^(1/b),} where \eqn{X} has a beta distribution with parameters \eqn{\alpha}{a} and \eqn{\tau}{c}. The Feller-Pareto defines a large family of distributions encompassing the transformed beta family and many variants of the Pareto distribution. Setting \eqn{\mu = 0}{min = 0} yields the \link[=dtrbeta]{transformed beta} distribution. The Feller-Pareto distribution also has the following direct special cases: \itemize{ \item A \link[=dpareto4]{Pareto IV} distribution when \code{shape3 == 1}; \item A \link[=dpareto3]{Pareto III} distribution when \code{shape1 shape3 == 1}; \item A \link[=dpareto2]{Pareto II} distribution when \code{shape1 shape2 == 1}; \item A \link[=dpareto1]{Pareto I} distribution when \code{shape1 shape2 == 1} and \code{min = scale}. } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} for nonnegative integer values of \eqn{k < \alpha\gamma}{k < shape1 * shape2}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} for nonnegative integer values of \eqn{k} and \eqn{\alpha - j/\gamma}{shape1 - j/shape2}, \eqn{j = 1, \dots, k} not a negative integer. Note that the range of admissible values for \eqn{k} in raw and limited moments is larger when \eqn{\mu = 0}{min == 0}. } \value{ \code{dfpareto} gives the density, \code{pfpareto} gives the distribution function, \code{qfpareto} gives the quantile function, \code{rfpareto} generates random deviates, \code{mfpareto} gives the \eqn{k}th raw moment, and \code{levfpareto} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levfpareto} computes the limited expected value using \code{\link{betaint}}. For the Feller-Pareto and other Pareto distributions, we use the classification of Arnold (2015) with the parametrization of Klugman et al. (2012). The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Dutang, C., Goulet, V., Langevin, N. (2022). Feller-Pareto and Related Distributions: Numerical Implementation and Actuarial Applications. \emph{Journal of Statistical Software}, \bold{103}(6), 1--22. \doi{10.18637/jss.v103.i06}. Abramowitz, M. and Stegun, I. A. (1972), \emph{Handbook of Mathematical Functions}, Dover. Arnold, B. C. (2015), \emph{Pareto Distributions}, Second Edition, CRC Press. Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dtrbeta}} for the transformed beta distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Nicholas Langevin } \examples{ exp(dfpareto(2, 1, 2, 3, 4, 5, log = TRUE)) p <- (1:10)/10 pfpareto(qfpareto(p, 1, 2, 3, 4, 5), 1, 2, 3, 4, 5) ## variance mfpareto(2, 1, 2, 3, 4, 5) - mfpareto(1, 1, 2, 3, 4, 5)^2 ## case with shape1 - order/shape2 > 0 levfpareto(10, 1, 2, 3, 4, scale = 1, order = 2) ## case with shape1 - order/shape2 < 0 levfpareto(20, 10, 0.1, 14, 2, scale = 1.5, order = 2) } \keyword{distribution} actuar/man/GeneralizedPareto.Rd0000644000176200001440000001210115147745722016210 0ustar liggesusers\name{GeneralizedPareto} \alias{GeneralizedPareto} \alias{dgenpareto} \alias{pgenpareto} \alias{qgenpareto} \alias{rgenpareto} \alias{mgenpareto} \alias{levgenpareto} \title{The Generalized Pareto Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Generalized Pareto distribution with parameters \code{shape1}, \code{shape2} and \code{scale}. } \usage{ dgenpareto(x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) pgenpareto(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qgenpareto(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rgenpareto(n, shape1, shape2, rate = 1, scale = 1/rate) mgenpareto(order, shape1, shape2, rate = 1, scale = 1/rate) levgenpareto(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1, shape2, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The Generalized Pareto distribution with parameters \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \tau}{= b} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\Gamma(\alpha + \tau)}{\Gamma(\alpha)\Gamma(\tau)} \frac{\theta^\alpha x^{\tau - 1}}{% (x + \theta)^{\alpha + \tau}}}{% f(x) = Gamma(a + b)/(Gamma(a) * Gamma(b)) * (s^a x^(b - 1))/(x + s)^(a + b)} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0}, \eqn{\tau > 0}{b > 0} and \eqn{\theta > 0}{s > 0}. (Here \eqn{\Gamma(\alpha)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The Generalized Pareto is the distribution of the random variable \deqn{\theta \left(\frac{X}{1 - X}\right),}{\theta (X/(1 - X)),} where \eqn{X} has a beta distribution with parameters \eqn{\alpha} and \eqn{\tau}. The Generalized Pareto distribution has the following special cases: \itemize{ \item A \link[=dpareto]{Pareto} distribution when \code{shape2 == 1}; \item An \link[=dinvpareto]{Inverse Pareto} distribution when \code{shape1 == 1}. } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}, \eqn{-\tau < k < \alpha}{-shape2 < k < shape1}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\tau}{k > -shape2} and \eqn{\alpha - k}{shape1 - k} not a negative integer. } \value{ \code{dgenpareto} gives the density, \code{pgenpareto} gives the distribution function, \code{qgenpareto} gives the quantile function, \code{rgenpareto} generates random deviates, \code{mgenpareto} gives the \eqn{k}th raw moment, and \code{levgenpareto} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levgenpareto} computes the limited expected value using \code{\link{betaint}}. Distribution also known as the Beta of the Second Kind. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The Generalized Pareto distribution defined here is different from the one in Embrechts et al. (1997) and in \href{https://en.wikipedia.org/wiki/Generalized_Pareto_distribution}{Wikipedia}; see also Kleiber and Kotz (2003, section 3.12). One may most likely compute quantities for the latter using functions for the \link[=dpareto]{Pareto} distribution with the appropriate change of parametrization. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Embrechts, P., Klüppelberg, C. and Mikisch, T. (1997), \emph{Modelling Extremal Events for Insurance and Finance}, Springer. Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dgenpareto(3, 3, 4, 4, log = TRUE)) p <- (1:10)/10 pgenpareto(qgenpareto(p, 3, 3, 1), 3, 3, 1) qgenpareto(.3, 3, 4, 4, lower.tail = FALSE) ## variance mgenpareto(2, 3, 2, 1) - mgenpareto(1, 3, 2, 1)^2 ## case with shape1 - order > 0 levgenpareto(10, 3, 3, scale = 1, order = 2) ## case with shape1 - order < 0 levgenpareto(10, 1.5, 3, scale = 1, order = 2) } \keyword{distribution} actuar/man/Loglogistic.Rd0000644000176200001440000000705715147745722015101 0ustar liggesusers\name{Loglogistic} \alias{Loglogistic} \alias{dllogis} \alias{pllogis} \alias{qllogis} \alias{rllogis} \alias{mllogis} \alias{levllogis} \title{The Loglogistic Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Loglogistic distribution with parameters \code{shape} and \code{scale}. } \usage{ dllogis(x, shape, rate = 1, scale = 1/rate, log = FALSE) pllogis(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qllogis(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rllogis(n, shape, rate = 1, scale = 1/rate) mllogis(order, shape, rate = 1, scale = 1/rate) levllogis(limit, shape, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The loglogistic distribution with parameters \code{shape} \eqn{= \gamma}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\gamma (x/\theta)^\gamma}{% x [1 + (x/\theta)^\gamma]^2}}{% f(x) = a (x/s)^a / (x [1 + (x/s)^a]^2)} for \eqn{x > 0}, \eqn{\gamma > 0}{a > 0} and \eqn{\theta > 0}{b > 0}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}, \eqn{-\gamma < k < \gamma}{-shape < k < shape}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\gamma}{k > -shape} and \eqn{1 - k/\gamma}{1 - k/shape} not a negative integer. } \value{ \code{dllogis} gives the density, \code{pllogis} gives the distribution function, \code{qllogis} gives the quantile function, \code{rllogis} generates random deviates, \code{mllogis} gives the \eqn{k}th raw moment, and \code{levllogis} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levllogis} computes the limited expected value using \code{\link{betaint}}. Also known as the Fisk distribution. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dpareto3}} for an equivalent distribution with a location parameter. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dllogis(2, 3, 4, log = TRUE)) p <- (1:10)/10 pllogis(qllogis(p, 2, 3), 2, 3) ## mean mllogis(1, 2, 3) ## case with 1 - order/shape > 0 levllogis(10, 2, 3, order = 1) ## case with 1 - order/shape < 0 levllogis(10, 2/3, 3, order = 1) } \keyword{distribution} actuar/man/InverseTransformedGamma.Rd0000644000176200001440000001116515147745722017400 0ustar liggesusers\name{InverseTransformedGamma} \alias{InverseTransformedGamma} \alias{dinvtrgamma} \alias{pinvtrgamma} \alias{qinvtrgamma} \alias{rinvtrgamma} \alias{minvtrgamma} \alias{levinvtrgamma} \title{The Inverse Transformed Gamma Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments, and limited moments for the Inverse Transformed Gamma distribution with parameters \code{shape1}, \code{shape2} and \code{scale}. } \usage{ dinvtrgamma(x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) pinvtrgamma(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qinvtrgamma(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rinvtrgamma(n, shape1, shape2, rate = 1, scale = 1/rate) minvtrgamma(order, shape1, shape2, rate = 1, scale = 1/rate) levinvtrgamma(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1, shape2, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The inverse transformed gamma distribution with parameters \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \tau}{= b} and \code{scale} \eqn{= \theta}{= s}, has density: \deqn{f(x) = \frac{\tau u^\alpha e^{-u}}{x \Gamma(\alpha)}, % \quad u = (\theta/x)^\tau}{% f(x) = b u^a exp(-u) / (x Gamma(a)), u = (s/x)^b} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0}, \eqn{\tau > 0}{b > 0} and \eqn{\theta > 0}{s > 0}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The inverse transformed gamma is the distribution of the random variable \eqn{\theta X^{-1/\tau},}{s X^(-1/b),} where \eqn{X} has a gamma distribution with shape parameter \eqn{\alpha}{a} and scale parameter \eqn{1} or, equivalently, of the random variable \eqn{Y^{-1/\tau}}{Y^(-1/b)} with \eqn{Y} a gamma distribution with shape parameter \eqn{\alpha}{a} and scale parameter \eqn{\theta^{-\tau}}{s^(-b)}. The inverse transformed gamma distribution defines a family of distributions with the following special cases: \itemize{ \item An \link[=dinvgamma]{Inverse Gamma} distribution when \code{shape2 == 1}; \item An \link[=dinvweibull]{Inverse Weibull} distribution when \code{shape1 == 1}; \item An \link[=dinvexp]{Inverse Exponential} distribution when \code{shape1 == shape2 == 1}; } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{k < \alpha\tau}{k < shape1 * shape2}, and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} for all \eqn{k}. } \value{ \code{dinvtrgamma} gives the density, \code{pinvtrgamma} gives the distribution function, \code{qinvtrgamma} gives the quantile function, \code{rinvtrgamma} generates random deviates, \code{minvtrgamma} gives the \eqn{k}th raw moment, and \code{levinvtrgamma} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levinvtrgamma} computes the limited expected value using \code{gammainc} from package \pkg{expint}. Distribution also known as the Inverse Generalized Gamma. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dinvtrgamma(2, 3, 4, 5, log = TRUE)) p <- (1:10)/10 pinvtrgamma(qinvtrgamma(p, 2, 3, 4), 2, 3, 4) minvtrgamma(2, 3, 4, 5) levinvtrgamma(200, 3, 4, 5, order = 2) } \keyword{distribution} actuar/man/GeneralizedBeta.Rd0000644000176200001440000001002115147745722015630 0ustar liggesusers\name{GeneralizedBeta} \alias{GeneralizedBeta} \alias{dgenbeta} \alias{pgenbeta} \alias{qgenbeta} \alias{rgenbeta} \alias{mgenbeta} \alias{levgenbeta} \title{The Generalized Beta Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Generalized Beta distribution with parameters \code{shape1}, \code{shape2}, \code{shape3} and \code{scale}. } \usage{ dgenbeta(x, shape1, shape2, shape3, rate = 1, scale = 1/rate, log = FALSE) pgenbeta(q, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qgenbeta(p, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rgenbeta(n, shape1, shape2, shape3, rate = 1, scale = 1/rate) mgenbeta(order, shape1, shape2, shape3, rate = 1, scale = 1/rate) levgenbeta(limit, shape1, shape2, shape3, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1, shape2, shape3, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The generalized beta distribution with parameters \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \beta}{= b}, \code{shape3} \eqn{= \tau}{= c} and \code{scale} \eqn{= \theta}{= s}, has density: \deqn{f(x) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha)\Gamma(\beta)} (x/\theta)^{\alpha \tau} (1 - (x/\theta)^\tau)^{\beta - 1} \frac{\tau}{x}}{% f(x) = Gamma(a + b)/(Gamma(a) * Gamma(b)) (c (x/s)^(ac) [1 - (x/s)^c]^(b - 1))/x} for \eqn{0 < x < \theta}{0 < x < s}, \eqn{\alpha > 0}{a > 0}, \eqn{\beta > 0}{b > 0}, \eqn{\tau > 0}{c > 0} and \eqn{\theta > 0}{s > 0}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The generalized beta is the distribution of the random variable \deqn{\theta X^{1/\tau},}{s X^(1/c),} where \eqn{X} has a beta distribution with parameters \eqn{\alpha}{a} and \eqn{\beta}{b}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)]}{E[min(X, d)]}, \eqn{k > -\alpha\tau}{k > -shape1 * shape3}. } \value{ \code{dgenbeta} gives the density, \code{pgenbeta} gives the distribution function, \code{qgenbeta} gives the quantile function, \code{rgenbeta} generates random deviates, \code{mgenbeta} gives the \eqn{k}th raw moment, and \code{levgenbeta} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ This is \emph{not} the generalized three-parameter beta distribution defined on page 251 of Johnson et al, 1995. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Johnson, N. L., Kotz, S. and Balakrishnan, N. (1995) \emph{Continuous Univariate Distributions, Volume 2}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ exp(dgenbeta(2, 2, 3, 4, 0.2, log = TRUE)) p <- (1:10)/10 pgenbeta(qgenbeta(p, 2, 3, 4, 0.2), 2, 3, 4, 0.2) mgenbeta(2, 1, 2, 3, 0.25) - mgenbeta(1, 1, 2, 3, 0.25) ^ 2 levgenbeta(10, 1, 2, 3, 0.25, order = 2) } \keyword{distribution} actuar/man/VaR.Rd0000644000176200001440000000105315147745722013300 0ustar liggesusers\name{VaR} \alias{VaR} \title{Value at Risk} \description{ Value at Risk. } \usage{ VaR(x, \dots) } \arguments{ \item{x}{an \R object.} \item{\dots}{further arguments passed to or from other methods.} } \details{ This is a generic function with, currently, only a method for objects of class \code{"aggregateDist"}. } \value{ An object of class \code{numeric}. } \seealso{ \code{\link{VaR.aggregateDist}}, \code{\link{aggregateDist}} } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Tommy Ouellet } \keyword{univar} actuar/man/quantile.grouped.data.Rd0000644000176200001440000000410415147745722017006 0ustar liggesusers\name{quantile.grouped.data} \alias{quantile.grouped.data} \alias{summary.grouped.data} \title{Quantiles of Grouped Data} \description{ Sample quantiles corresponding to the given probabilities for objects of class \code{"grouped.data"}. } \usage{ \method{quantile}{grouped.data}(x, probs = seq(0, 1, 0.25), names = TRUE, \dots) \method{summary}{grouped.data}(object, \dots) } \arguments{ \item{x, object}{an object of class \code{"grouped.data"}.} \item{probs}{numeric vector of probabilities with values in \eqn{[0, 1]}.} \item{names}{logical; if true, the result has a \code{names} attribute. Set to \code{FALSE} for speedup with many \code{probs}.} \item{\dots}{further arguments passed to or from other methods.} } \details{ The quantile function is the inverse of the ogive, that is a linear interpolation of the empirical quantile function. The equation of the quantile function is \deqn{x = \frac{c_j (F_n(c_{j - 1}) - q) + c_{j - 1} (q - F_n(c_j)}{F_n(c_j) - F_n(c_{j - 1})}}{% x = (c[j] (Fn(c[j-1]) - q) + c[j-1] (q - Fn(c[j])))/(Fn(c[j]) - Fn(c[j-1]))} for \eqn{0 \leq q \leq c_j}{0 <= q <= 1} and where \eqn{c_0, \dots, c_r}{c[0], \dots, c[r]} are the \eqn{r + 1} group boundaries and \eqn{F_n}{Fn} is the empirical distribution function of the sample. } \value{ For \code{quantile}, a numeric vector, named if \code{names} is \code{TRUE}. For the \code{summary} method, an object of class \code{c("summaryDefault", "\link{table}")} which has specialized \code{\link{format}} and \code{\link{print}} methods. } \seealso{ \code{\link{ogive}} for the smoothed empirical distribution of which \code{quantile.grouped.data} is an inverse; \code{\link{mean.grouped.data}} and \code{\link{var.grouped.data}} to compute the mean and variance of grouped data. \code{\link{grouped.data}} to create grouped data objects. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ data(gdental) quantile(gdental) summary(gdental) Fn <- ogive(gdental) Fn(quantile(gdental)) # inverse function } \keyword{univar} actuar/man/Extract.grouped.data.Rd0000644000176200001440000000511015147745722016574 0ustar liggesusers\name{Extract.grouped.data} \alias{Extract.grouped.data} \alias{[.grouped.data} \alias{[<-.grouped.data} \title{Extract or Replace Parts of a Grouped Data Object} \description{ Extract or replace subsets of grouped data objects. } \usage{ \method{[}{grouped.data}(x, i, j) \method{[}{grouped.data}(x, i, j) <- value } \arguments{ \item{x}{an object of class \code{grouped.data}.} \item{i, j}{elements to extract or replace. \code{i, j} are \code{numeric} or \code{character} or, for \code{[} only, empty. Numeric values are coerced to integer as if by \code{\link[base]{as.integer}}. For replacement by \code{[}, a logical matrix is allowed, but not replacement in the group boundaries and group frequencies simultaneously.} \item{value}{a suitable replacement value.} } \details{ Objects of class \code{"grouped.data"} can mostly be indexed like data frames, with the following restrictions: \enumerate{ \item For \code{[}, the extracted object must keep a group boundaries column and at least one group frequencies column to remain of class \code{"grouped.data"}; \item For \code{[<-}, it is not possible to replace group boundaries and group frequencies simultaneously; \item When replacing group boundaries, \code{length(value) == length(i) + 1}. } \code{x[, 1]} will return the plain vector of group boundaries. Replacement of non adjacent group boundaries is not possible for obvious reasons. Otherwise, extraction and replacement should work just like for data frames. } \value{ For \code{[} an object of class \code{"grouped.data"}, a data frame or a vector. For \code{[<-} an object of class \code{"grouped.data"}. } \note{ Currently \code{[[}, \code{[[<-}, \code{$} and \code{$<-} are not specifically supported, but should work as usual on group frequency columns. } \seealso{ \code{\link[base]{[.data.frame}} for extraction and replacement methods of data frames, \code{\link{grouped.data}} to create grouped data objects. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ data(gdental) (x <- gdental[1]) # select column 1 class(x) # no longer a grouped.data object class(gdental[2]) # same gdental[, 1] # group boundaries gdental[, 2] # group frequencies gdental[1:4,] # a subset gdental[c(1, 3, 5),] # avoid this gdental[1:2, 1] <- c(0, 30, 60) # modified boundaries gdental[, 2] <- 10 # modified frequencies \dontrun{gdental[1, ] <- 2} # not allowed } \keyword{manip} \keyword{array} actuar/man/rcomphierarc.summaries.Rd0000644000176200001440000001223615147745722017277 0ustar liggesusers\name{rcomphierarc.summaries} \alias{rcomphierarc.summaries} \alias{aggregate.portfolio} \alias{frequency.portfolio} \alias{severity.portfolio} \alias{weights.portfolio} \title{Summary Statistics of a Portfolio} \description{ Methods for \link[base]{class} \code{"portfolio"} objects. \code{aggregate} splits portfolio data into subsets and computes summary statistics for each. \code{frequency} computes the frequency of claims for subsets of portfolio data. \code{severity} extracts the individual claim amounts. \code{weights} extracts the matrix of weights. } \usage{ \method{aggregate}{portfolio}(x, by = names(x$nodes), FUN = sum, classification = TRUE, prefix = NULL, \dots) \method{frequency}{portfolio}(x, by = names(x$nodes), classification = TRUE, prefix = NULL, \dots) \method{severity}{portfolio}(x, by = head(names(x$node), -1), splitcol = NULL, classification = TRUE, prefix = NULL, \dots) \method{weights}{portfolio}(object, classification = TRUE, prefix = NULL, \dots) } \arguments{ \item{x, object}{an object of class \code{"portfolio"}, typically created with \code{\link{simul}}.} \item{by}{character vector of grouping elements using the level names of the portfolio in \code{x}. The names can be abbreviated.} \item{FUN}{the function to be applied to data subsets.} \item{classification}{boolean; if \code{TRUE}, the node identifier columns are included in the output.} \item{prefix}{characters to prefix column names with; if \code{NULL}, sensible defaults are used when appropriate.} \item{splitcol}{columns of the data matrix to extract separately; usual matrix indexing methods are supported.} \item{\dots}{optional arguments to \code{FUN}, or passed to or from other methods.} } \details{ By default, \code{aggregate.portfolio} computes the aggregate claim amounts for the grouping specified in \code{by}. Any other statistic based on the individual claim amounts can be used through argument \code{FUN}. \code{frequency.portfolio} is equivalent to using \code{aggregate.portfolio} with argument \code{FUN} equal to \code{if (identical(x, NA)) NA else length(x)}. \code{severity.portfolio} extracts individual claim amounts of a portfolio by groupings using the default method of \code{\link{severity}}. Argument \code{splitcol} allows to get the individual claim amounts of specific columns separately. \code{weights.portfolio} extracts the weight matrix of a portfolio. } \value{ A matrix or vector depending on the groupings specified in \code{by}. For the \code{aggregate} and \code{frequency} methods: if at least one level other than the last one is used for grouping, the result is a matrix obtained by binding the appropriate node identifiers extracted from \code{x$classification} if \code{classification = TRUE}, and the summaries per grouping. If the last level is used for grouping, the column names of \code{x$data} are retained; if the last level is not used for grouping, the column name is replaced by the deparsed name of \code{FUN}. If only the last level is used (column summaries), a named vector is returned. For the \code{severity} method: a list of two elements: \item{main}{\code{NULL} or a matrix of claim amounts for the columns not specified in \code{splitcol}, with the appropriate node identifiers extracted from \code{x$classification} if \code{classification = TRUE};} \item{split}{same as above, but for the columns specified in \code{splitcol}.} For the \code{weights} method: the weight matrix of the portfolio with node identifiers if \code{classification = TRUE}. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Louis-Philippe Pouliot. } \seealso{ \code{\link{rcomphierarc}} } \examples{ nodes <- list(sector = 3, unit = c(3, 4), employer = c(3, 4, 3, 4, 2, 3, 4), year = 5) model.freq <- expression(sector = rexp(1), unit = rexp(sector), employer = rgamma(unit, 1), year = rpois(employer)) model.sev <- expression(sector = rnorm(6, 0.1), unit = rnorm(sector, 1), employer = rnorm(unit, 1), year = rlnorm(employer, 1)) pf <- rcomphierarc(nodes, model.freq, model.sev) aggregate(pf) # aggregate claim amount by employer and year aggregate(pf, classification = FALSE) # same, without node identifiers aggregate(pf, by = "sector") # by sector aggregate(pf, by = "y") # by year aggregate(pf, by = c("s", "u"), mean) # average claim amount frequency(pf) # number of claims frequency(pf, prefix = "freq.") # more explicit column names severity(pf) # claim amounts by row severity(pf, by = "year") # by column severity(pf, by = c("s", "u")) # by unit severity(pf, splitcol = "year.5") # last year separate severity(pf, splitcol = 5) # same severity(pf, splitcol = c(FALSE, FALSE, FALSE, FALSE, TRUE)) # same weights(pf) ## For portfolios with weights, the following computes loss ratios. \dontrun{aggregate(pf, classif = FALSE) / weights(pf, classif = FALSE)} } \keyword{models} \keyword{methods} actuar/man/CTE.Rd0000644000176200001440000000522615147745722013231 0ustar liggesusers\name{CTE} \alias{CTE} \alias{TVaR} \alias{CTE.aggregateDist} \title{Conditional Tail Expectation} \description{ Conditional Tail Expectation, also called Tail Value-at-Risk. \code{TVaR} is an alias for \code{CTE}. } \usage{ CTE(x, \dots) \method{CTE}{aggregateDist}(x, conf.level = c(0.9, 0.95, 0.99), names = TRUE, \dots) TVaR(x, \dots) } \arguments{ \item{x}{an \R object.} \item{conf.level}{numeric vector of probabilities with values in \eqn{[0, 1)}.} \item{names}{logical; if true, the result has a \code{names} attribute. Set to \code{FALSE} for speedup with many \code{probs}.} \item{\dots}{further arguments passed to or from other methods.} } \details{ The Conditional Tail Expectation (or Tail Value-at-Risk) measures the average of losses above the Value at Risk for some given confidence level, that is \eqn{E[X|X > \mathrm{VaR}(X)]} where \eqn{X} is the loss random variable. \code{CTE} is a generic function with, currently, only a method for objects of class \code{"aggregateDist"}. For the recursive, convolution and simulation methods of \code{\link{aggregateDist}}, the CTE is computed from the definition using the empirical cdf. For the normal approximation method, an explicit formula exists: \deqn{\mu + \frac{\sigma}{(1 - \alpha) \sqrt{2 \pi}} e^{-\mathrm{VaR}(X)^2/2},}{% m + s exp(-VaR(X)^2/2)/((1 - a) * sqrt(2 pi)),} where \eqn{\mu}{m} is the mean, \eqn{\sigma}{s} the standard deviation and \eqn{\alpha}{a} the confidence level. For the Normal Power approximation, the explicit formula given in Castañer et al. (2013) is \deqn{\mu + \frac{\sigma}{(1 - \alpha) \sqrt{2 \pi}} e^{-\mathrm{VaR}(X)^2/2} \left( 1 + \frac{\gamma}{6} \mathrm{VaR}(X) \right),}{% m + s exp(-VaR(X)^2/2)/((1 - a) * sqrt(2 pi)) (1 + g * VaR(X)/6),} where, as above, \eqn{\mu}{m} is the mean, \eqn{\sigma}{s} the standard deviation, \eqn{\alpha}{a} the confidence level and \eqn{\gamma}{g} is the skewness. } \value{ A numeric vector, named if \code{names} is \code{TRUE}. } \seealso{ \code{\link{aggregateDist}}; \code{\link{VaR}} } \references{ Castañer, A. and Claramunt, M.M. and Mármol, M. (2013), Tail value at risk. An analysis with the Normal-Power approximation. In \emph{Statistical and Soft Computing Approaches in Insurance Problems}, pp. 87-112. Nova Science Publishers, 2013. ISBN 978-1-62618-506-7. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Tommy Ouellet } \examples{ model.freq <- expression(data = rpois(7)) model.sev <- expression(data = rnorm(9, 2)) Fs <- aggregateDist("simulation", model.freq, model.sev, nb.simul = 1000) CTE(Fs) } \keyword{univar} actuar/man/NormalSupp.Rd0000644000176200001440000000250615147745722014714 0ustar liggesusers\name{NormalSupp} \alias{NormalSupp} \alias{mnorm} \alias{mgfnorm} \title{Moments and Moment generating function of the Normal Distribution} \description{ Raw moments and moment generating function for the normal distribution with mean equal to \code{mean} and standard deviation equal to \code{sd}. } \usage{ mnorm(order, mean = 0, sd = 1) mgfnorm(t, mean = 0, sd = 1, log = FALSE) } \arguments{ \item{order}{vector of integers; order of the moment.} \item{mean}{vector of means.} \item{sd}{vector of standard deviations.} \item{t}{numeric vector.} \item{log}{logical; if \code{TRUE}, the cumulant generating function is returned.} } \details{ The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} and the moment generating function is \eqn{E[e^{tX}]}. Only integer moments are supported. } \value{ \code{mnorm} gives the \eqn{k}th raw moment and \code{mgfnorm} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link{Normal}} } \references{ Johnson, N. L. and Kotz, S. (1970), \emph{Continuous Univariate Distributions, Volume 1}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Christophe Dutang } \examples{ mgfnorm(0:4,1,2) mnorm(3) } \keyword{distribution} actuar/man/ExponentialSupp.Rd0000644000176200001440000000326515147745722015755 0ustar liggesusers\name{ExponentialSupp} \alias{ExponentialSupp} \alias{mexp} \alias{levexp} \alias{mgfexp} \title{Moments and Moment Generating Function of the Exponential Distribution} \description{ Raw moments, limited moments and moment generating function for the exponential distribution with rate \code{rate} (i.e., mean \code{1/rate}). } \usage{ mexp(order, rate = 1) levexp(limit, rate = 1, order = 1) mgfexp(t, rate = 1, log = FALSE) } \arguments{ \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} \item{rate}{vector of rates.} \item{t}{numeric vector.} \item{log}{logical; if \code{TRUE}, the cumulant generating function is returned.} } \details{ The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} and the moment generating function is \eqn{E[e^{tX}]}, \eqn{k > -1}. } \value{ \code{mexp} gives the \eqn{k}th raw moment, \code{levexp} gives the \eqn{k}th moment of the limited loss variable, and \code{mgfexp} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link[stats]{Exponential}} } \references{ Johnson, N. L. and Kotz, S. (1970), \emph{Continuous Univariate Distributions, Volume 1}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Christophe Dutang and Mathieu Pigeon. } \examples{ mexp(2, 3) - mexp(1, 3)^2 levexp(10, 3, order = 2) mgfexp(1,2) } \keyword{distribution} actuar/man/Logarithmic.Rd0000644000176200001440000001044515147745722015057 0ustar liggesusers\name{Logarithmic} \alias{Logarithmic} \alias{dlogarithmic} \alias{plogarithmic} \alias{qlogarithmic} \alias{rlogarithmic} \alias{log-series} \title{The Logarithmic Distribution} \description{ Density function, distribution function, quantile function and random generation for the Logarithmic (or log-series) distribution with parameter \code{prob}. } \usage{ dlogarithmic(x, prob, log = FALSE) plogarithmic(q, prob, lower.tail = TRUE, log.p = FALSE) qlogarithmic(p, prob, lower.tail = TRUE, log.p = FALSE) rlogarithmic(n, prob) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{prob}{parameter. \code{0 <= prob < 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The logarithmic (or log-series) distribution with parameter \code{prob} \eqn{= \theta}{= p} has probability mass function \deqn{% p(x) = \frac{a \theta^x}{x},}{% p(x) = a p^x / x,} with \eqn{a = -1/\log(1 - \theta)}{a = -1/log(1-p)} and for \eqn{x = 1, 2, \ldots}, \eqn{0 \le \theta < 1}{0 \le p < 1}. The logarithmic distribution is the limiting case of the zero-truncated negative binomial distribution with \code{size} parameter equal to \eqn{0}. Note that in this context, parameter \code{prob} generally corresponds to the probability of \emph{failure} of the zero-truncated negative binomial. If an element of \code{x} is not integer, the result of \code{dlogarithmic} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{F(x) \ge p}, where \eqn{F} is the distribution function. } \value{ \code{dlogarithmic} gives the probability mass function, \code{plogarithmic} gives the distribution function, \code{qlogarithmic} gives the quantile function, and \code{rlogarithmic} generates random deviates. Invalid \code{prob} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rlogarithmic}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ \code{qlogarithmic} is based on \code{qbinom} et al.; it uses the Cornish--Fisher Expansion to include a skewness correction to a normal approximation, followed by a search. \code{rlogarithmic} is an implementation of the LS and LK algorithms of Kemp (1981) with automatic selection. As suggested by Devroye (1986), the LS algorithm is used when \code{prob < 0.95}, and the LK algorithm otherwise. } \references{ Johnson, N. L., Kemp, A. W. and Kotz, S. (2005), \emph{Univariate Discrete Distributions, Third Edition}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. Kemp, A. W. (1981), \dQuote{Efficient Generation of Logarithmically Distributed Pseudo-Random Variables}, \emph{Journal of the Royal Statistical Society, Series C}, vol. 30, p. 249-253. Devroye, L. (1986), \emph{Non-Uniform Random Variate Generation}, Springer-Verlag. \url{https://luc.devroye.org/rnbookindex.html} } \seealso{ \code{\link{dztnbinom}} for the zero-truncated negative binomial distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ ## Table 1 of Kemp (1981) [also found in Johnson et al. (2005), chapter 7] p <- c(0.1, 0.3, 0.5, 0.7, 0.8, 0.85, 0.9, 0.95, 0.99, 0.995, 0.999, 0.9999) round(rbind(dlogarithmic(1, p), dlogarithmic(2, p), plogarithmic(9, p, lower.tail = FALSE), -p/((1 - p) * log(1 - p))), 2) qlogarithmic(plogarithmic(1:10, 0.9), 0.9) x <- rlogarithmic(1000, 0.8) y <- sort(unique(x)) plot(y, table(x)/length(x), type = "h", lwd = 2, pch = 19, col = "black", xlab = "x", ylab = "p(x)", main = "Empirical vs theoretical probabilities") points(y, dlogarithmic(y, prob = 0.8), pch = 19, col = "red") legend("topright", c("empirical", "theoretical"), lty = c(1, NA), pch = c(NA, 19), col = c("black", "red")) } \keyword{distribution} actuar/man/BetaMoments.Rd0000644000176200001440000000265215147745722015034 0ustar liggesusers\name{BetaMoments} \alias{BetaMoments} \alias{mbeta} \alias{levbeta} \title{Raw and Limited Moments of the Beta Distribution} \description{ Raw moments and limited moments for the (central) Beta distribution with parameters \code{shape1} and \code{shape2}. } \usage{ mbeta(order, shape1, shape2) levbeta(limit, shape1, shape2, order = 1) } \arguments{ \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} \item{shape1, shape2}{positive parameters of the Beta distribution.} } \details{ The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\alpha}{k > -shape1}. The noncentral beta distribution is not supported. } \value{ \code{mbeta} gives the \eqn{k}th raw moment and \code{levbeta} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link[stats]{Beta}} for details on the beta distribution and functions \code{[dpqr]beta}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ mbeta(2, 3, 4) - mbeta(1, 3, 4)^2 levbeta(10, 3, 4, order = 2) } \keyword{distribution} actuar/man/InverseGamma.Rd0000644000176200001440000000750715147745722015200 0ustar liggesusers\name{InverseGamma} \alias{InverseGamma} \alias{dinvgamma} \alias{pinvgamma} \alias{qinvgamma} \alias{rinvgamma} \alias{minvgamma} \alias{levinvgamma} \alias{mgfinvgamma} \title{The Inverse Gamma Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments, and limited moments for the Inverse Gamma distribution with parameters \code{shape} and \code{scale}. } \usage{ dinvgamma(x, shape, rate = 1, scale = 1/rate, log = FALSE) pinvgamma(q, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qinvgamma(p, shape, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rinvgamma(n, shape, rate = 1, scale = 1/rate) minvgamma(order, shape, rate = 1, scale = 1/rate) levinvgamma(limit, shape, rate = 1, scale = 1/rate, order = 1) mgfinvgamma(t, shape, rate =1, scale = 1/rate, log =FALSE) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} \item{t}{numeric vector.} } \details{ The inverse gamma distribution with parameters \code{shape} \eqn{= \alpha}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{u^\alpha e^{-u}}{x \Gamma(\alpha)}, % \quad u = \theta/x}{% f(x) = u^a exp(-u)/(x Gamma(a)), u = s/x} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0} and \eqn{\theta > 0}{s > 0}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The special case \code{shape == 1} is an \link[=dinvexp]{Inverse Exponential} distribution. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{k < \alpha}{k < shape}, and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, all \eqn{k}. The moment generating function is given by \eqn{E[e^{tX}]}. } \value{ \code{dinvgamma} gives the density, \code{pinvgamma} gives the distribution function, \code{qinvgamma} gives the quantile function, \code{rinvgamma} generates random deviates, \code{minvgamma} gives the \eqn{k}th raw moment, \code{levinvgamma} gives the \eqn{k}th moment of the limited loss variable, and \code{mgfinvgamma} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levinvgamma} computes the limited expected value using \code{gammainc} from package \pkg{expint}. Also known as the Vinci distribution. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dinvgamma(2, 3, 4, log = TRUE)) p <- (1:10)/10 pinvgamma(qinvgamma(p, 2, 3), 2, 3) minvgamma(-1, 2, 2) ^ 2 levinvgamma(10, 2, 2, order = 1) mgfinvgamma(-1, 3, 2) } \keyword{distribution} actuar/man/coverage.Rd0000644000176200001440000000747715147745722014423 0ustar liggesusers\name{coverage} \alias{coverage} \alias{Coverage} \title{Density and Cumulative Distribution Function for Modified Data} \description{ Compute probability density function or cumulative distribution function of the payment per payment or payment per loss random variable under any combination of the following coverage modifications: deductible, limit, coinsurance, inflation. } \usage{ coverage(pdf, cdf, deductible = 0, franchise = FALSE, limit = Inf, coinsurance = 1, inflation = 0, per.loss = FALSE) } \arguments{ \item{pdf, cdf}{function object or character string naming a function to compute, respectively, the probability density function and cumulative distribution function of a probability law.} \item{deductible}{a unique positive numeric value.} \item{franchise}{logical; \code{TRUE} for a franchise deductible, \code{FALSE} (default) for an ordinary deductible.} \item{limit}{a unique positive numeric value larger than \code{deductible}.} \item{coinsurance}{a unique value between 0 and 1; the proportion of coinsurance.} \item{inflation}{a unique value between 0 and 1; the rate of inflation.} \item{per.loss}{logical; \code{TRUE} for the per loss distribution, \code{FALSE} (default) for the per payment distribution.} } \details{ \code{coverage} returns a function to compute the probability density function (pdf) or the cumulative distribution function (cdf) of the distribution of losses under coverage modifications. The pdf and cdf of unmodified losses are \code{pdf} and \code{cdf}, respectively. If \code{pdf} is specified, the pdf is returned; if \code{pdf} is missing or \code{NULL}, the cdf is returned. Note that \code{cdf} is needed if there is a deductible or a limit. } \value{ An object of mode \code{"function"} with the same arguments as \code{pdf} or \code{cdf}, except \code{"lower.tail"}, \code{"log.p"} and \code{"log"}, which are not supported. } \note{ Setting arguments of the function returned by \code{coverage} using \code{\link{formals}} may very well not work as expected. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \seealso{ \code{vignette("coverage")} for the exact definitions of the per payment and per loss random variables under an ordinary or franchise deductible. } \examples{ ## Default case: pdf of the per payment random variable with ## an ordinary deductible coverage(dgamma, pgamma, deductible = 1) ## Add a limit f <- coverage(dgamma, pgamma, deductible = 1, limit = 7) f <- coverage("dgamma", "pgamma", deductible = 1, limit = 7) # same f(0, shape = 3, rate = 1) f(2, shape = 3, rate = 1) f(6, shape = 3, rate = 1) f(8, shape = 3, rate = 1) curve(dgamma(x, 3, 1), xlim = c(0, 10), ylim = c(0, 0.3)) # original curve(f(x, 3, 1), xlim = c(0.01, 5.99), col = 4, add = TRUE) # modified points(6, f(6, 3, 1), pch = 21, bg = 4) ## Cumulative distribution function F <- coverage(cdf = pgamma, deductible = 1, limit = 7) F(0, shape = 3, rate = 1) F(2, shape = 3, rate = 1) F(6, shape = 3, rate = 1) F(8, shape = 3, rate = 1) curve(pgamma(x, 3, 1), xlim = c(0, 10), ylim = c(0, 1)) # original curve(F(x, 3, 1), xlim = c(0, 5.99), col = 4, add = TRUE) # modified curve(F(x, 3, 1), xlim = c(6, 10), col = 4, add = TRUE) # modified ## With no deductible, all distributions below are identical coverage(dweibull, pweibull, limit = 5) coverage(dweibull, pweibull, per.loss = TRUE, limit = 5) coverage(dweibull, pweibull, franchise = TRUE, limit = 5) coverage(dweibull, pweibull, per.loss = TRUE, franchise = TRUE, limit = 5) ## Coinsurance alone; only case that does not require the cdf coverage(dgamma, coinsurance = 0.8) } \keyword{models} actuar/man/UniformSupp.Rd0000644000176200001440000000305315147745722015101 0ustar liggesusers\name{UniformSupp} \alias{UniformSupp} \alias{munif} \alias{levunif} \alias{mgfunif} \title{Moments and Moment Generating Function of the Uniform Distribution} \description{ Raw moments, limited moments and moment generating function for the Uniform distribution from \code{min} to \code{max}. } \usage{ munif(order, min = 0, max = 1) levunif(limit, min = 0, max =1, order = 1) mgfunif(t, min = 0, max = 1, log = FALSE) } \arguments{ \item{order}{order of the moment.} \item{min, max}{lower and upper limits of the distribution. Must be finite.} \item{limit}{limit of the random variable.} \item{t}{numeric vector.} \item{log}{logical; if \code{TRUE}, the cumulant generating function is returned.} } \details{ The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]} and the moment generating function is \eqn{E[e^{tX}]}. } \value{ \code{munif} gives the \eqn{k}th raw moment, \code{levunif} gives the \eqn{k}th moment of the limited random variable, and \code{mgfunif} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link{Uniform}}. } \references{ \url{https://en.wikipedia.org/wiki/Uniform_distribution_\%28continuous\%29} } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, Christophe Dutang } \examples{ munif(-1) munif(1:5) levunif(3, order=1:5) levunif(3, 2, 4) mgfunif(1, 1, 2) } \keyword{distribution} actuar/man/TransformedBeta.Rd0000644000176200001440000001313715147745722015676 0ustar liggesusers\name{TransformedBeta} \alias{TransformedBeta} \alias{dtrbeta} \alias{ptrbeta} \alias{qtrbeta} \alias{rtrbeta} \alias{mtrbeta} \alias{levtrbeta} \alias{Pearson6} \alias{dpearson6} \alias{ppearson6} \alias{qpearson6} \alias{rpearson6} \alias{mpearson6} \alias{levpearson6} \title{The Transformed Beta Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Transformed Beta distribution with parameters \code{shape1}, \code{shape2}, \code{shape3} and \code{scale}. } \usage{ dtrbeta(x, shape1, shape2, shape3, rate = 1, scale = 1/rate, log = FALSE) ptrbeta(q, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qtrbeta(p, shape1, shape2, shape3, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rtrbeta(n, shape1, shape2, shape3, rate = 1, scale = 1/rate) mtrbeta(order, shape1, shape2, shape3, rate = 1, scale = 1/rate) levtrbeta(limit, shape1, shape2, shape3, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1, shape2, shape3, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The transformed beta distribution with parameters \code{shape1} \eqn{= \alpha}{= a}, \code{shape2} \eqn{= \gamma}{= b}, \code{shape3} \eqn{= \tau}{= c} and \code{scale} \eqn{= \theta}{= s}, has density: \deqn{f(x) = \frac{\Gamma(\alpha + \tau)}{\Gamma(\alpha)\Gamma(\tau)} \frac{\gamma (x/\theta)^{\gamma \tau}}{% x [1 + (x/\theta)^\gamma]^{\alpha + \tau}}}{% f(x) = Gamma(a + c)/(Gamma(a) * Gamma(c)) (b (x/s)^(bc))/% (x [1 + (x/s)^b]^(a + c))} for \eqn{x > 0}, \eqn{\alpha > 0}{a > 0}, \eqn{\gamma > 0}{b > 0}, \eqn{\tau > 0}{c > 0} and \eqn{\theta > 0}{s > 0}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The transformed beta is the distribution of the random variable \deqn{\theta \left(\frac{X}{1 - X}\right)^{1/\gamma},}{% s (X/(1 - X))^(1/b),} where \eqn{X} has a beta distribution with parameters \eqn{\tau}{c} and \eqn{\alpha}{a}. The transformed beta distribution defines a family of distributions with the following special cases: \itemize{ \item A \link[=dburr]{Burr} distribution when \code{shape3 == 1}; \item A \link[=dllogis]{loglogistic} distribution when \code{shape1 == shape3 == 1}; \item A \link[=dparalogis]{paralogistic} distribution when \code{shape3 == 1} and \code{shape2 == shape1}; \item A \link[=dgenpareto]{generalized Pareto} distribution when \code{shape2 == 1}; \item A \link[=dpareto]{Pareto} distribution when \code{shape2 == shape3 == 1}; \item An \link[=dinvburr]{inverse Burr} distribution when \code{shape1 == 1}; \item An \link[=dinvpareto]{inverse Pareto} distribution when \code{shape2 == shape1 == 1}; \item An \link[=dinvparalogis]{inverse paralogistic} distribution when \code{shape1 == 1} and \code{shape3 == shape2}. } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}, \eqn{-\tau\gamma < k < \alpha\gamma}{-shape3 * shape2 < k < shape1 * shape2}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\tau\gamma}{k > -shape3 * shape2} and \eqn{\alpha - k/\gamma}{shape1 - k/shape2} not a negative integer. } \value{ \code{dtrbeta} gives the density, \code{ptrbeta} gives the distribution function, \code{qtrbeta} gives the quantile function, \code{rtrbeta} generates random deviates, \code{mtrbeta} gives the \eqn{k}th raw moment, and \code{levtrbeta} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levtrbeta} computes the limited expected value using \code{\link{betaint}}. Distribution also known as the Generalized Beta of the Second Kind and Pearson Type VI. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dfpareto}} for an equivalent distribution with a location parameter. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dtrbeta(2, 2, 3, 4, 5, log = TRUE)) p <- (1:10)/10 ptrbeta(qtrbeta(p, 2, 3, 4, 5), 2, 3, 4, 5) qpearson6(0.3, 2, 3, 4, 5, lower.tail = FALSE) ## variance mtrbeta(2, 2, 3, 4, 5) - mtrbeta(1, 2, 3, 4, 5)^2 ## case with shape1 - order/shape2 > 0 levtrbeta(10, 2, 3, 4, scale = 1, order = 2) ## case with shape1 - order/shape2 < 0 levtrbeta(10, 1/3, 0.75, 4, scale = 0.5, order = 2) } \keyword{distribution} actuar/man/actuar-package.Rd0000644000176200001440000000764715147745722015477 0ustar liggesusers\name{actuar-package} \alias{actuar-package} \alias{actuar} \docType{package} \title{\packageTitle{actuar}} \description{\packageDescription{actuar}} \details{ \pkg{actuar} provides additional actuarial science functionality and support for heavy tailed distributions to the \R statistical system. The current feature set of the package can be split into five main categories. \enumerate{ \item{Additional probability distributions: 23 continuous heavy tailed distributions from the Feller-Pareto and Transformed Gamma families, the loggamma, the Gumbel, the inverse Gaussian and the generalized beta; phase-type distributions; the Poisson-inverse Gaussian discrete distribution; zero-truncated and zero-modified extensions of the standard discrete distributions; computation of raw moments, limited moments and the moment generating function (when it exists) of continuous distributions. See the \dQuote{distributions} package vignette for details.} \item{Loss distributions modeling: extensive support of grouped data; functions to compute empirical raw and limited moments; support for minimum distance estimation using three different measures; treatment of coverage modifications (deductibles, limits, inflation, coinsurance). See the \dQuote{modeling} and \dQuote{coverage} package vignettes for details.} \item{Risk and ruin theory: discretization of the claim amount distribution; calculation of the aggregate claim amount distribution; calculation of the adjustment coefficient; calculation of the probability of ruin, including using phase-type distributions. See the \dQuote{risk} package vignette for details.} \item{Simulation of discrete mixtures, compound models (including the compound Poisson), and compound hierarchical models. See the \dQuote{simulation} package vignette for details.} \item{Credibility theory: function \code{cm} fits hierarchical (including Bühlmann, Bühlmann-Straub), regression and linear Bayes credibility models. See the \dQuote{credibility} package vignette for details.} } } \author{ Christophe Dutang, Vincent Goulet, Mathieu Pigeon and many other contributors; use \code{packageDescription("actuar")} for the complete list. Maintainer: Vincent Goulet. } \references{ Dutang, C., Goulet, V. and Pigeon, M. (2008). actuar: An R Package for Actuarial Science. \emph{Journal of Statistical Software}, \bold{25}(7), 1--37. \doi{10.18637/jss.v025.i07}. Dutang, C., Goulet, V., Langevin, N. (2022). Feller-Pareto and Related Distributions: Numerical Implementation and Actuarial Applications. \emph{Journal of Statistical Software}, \bold{103}(6), 1--22. \doi{10.18637/jss.v103.i06}. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ For probability distributions support functions, use as starting points: \code{\link{FellerPareto}}, \code{\link{TransformedGamma}}, \code{\link{Loggamma}}, \code{\link{Gumbel}}, \code{\link{InverseGaussian}}, \code{\link{PhaseType}}, \code{\link{PoissonInverseGaussian}} and, e.g., \code{\link{ZeroTruncatedPoisson}}, \code{\link{ZeroModifiedPoisson}}. For loss modeling support functions: \code{\link{grouped.data}}, \code{\link{ogive}}, \code{\link{emm}}, \code{\link{elev}}, \code{\link{mde}}, \code{\link{coverage}}. For risk and ruin theory functions: \code{\link{discretize}}, \code{\link{aggregateDist}}, \code{\link{adjCoef}}, \code{\link{ruin}}. For credibility theory functions and datasets: \code{\link{cm}}, \code{\link{hachemeister}}. } \examples{ ## The package comes with extensive demonstration scripts; ## use the following command to obtain the list. \dontrun{demo(package = "actuar")} } \keyword{package} \keyword{distribution} \keyword{models} \keyword{univar} actuar/man/var-methods.Rd0000644000176200001440000000611415147745722015044 0ustar liggesusers\name{var} \alias{var} \alias{var.default} \alias{var.grouped.data} \alias{sd} \alias{sd.default} \alias{sd.grouped.data} \title{Variance and Standard Deviation} \description{ Generic functions for the variance and standard deviation, and methods for individual and grouped data. The default methods for individual data are the functions from the \pkg{stats} package. } \usage{ var(x, \dots) \method{var}{default}(x, y = NULL, na.rm = FALSE, use, \dots) \method{var}{grouped.data}(x, \dots) sd(x, \dots) \method{sd}{default}(x, na.rm = FALSE, \dots) \method{sd}{grouped.data}(x, \dots) } \arguments{ \item{x}{a vector or matrix of individual data, or an object of class \code{"grouped data"}.} \item{y}{see \code{\link[stats:var]{stats::var}}.} \item{na.rm}{see \code{\link[stats:var]{stats::var}}.} \item{use}{see \code{\link[stats:var]{stats::var}}.} \item{\dots}{further arguments passed to or from other methods.} } \details{ This page documents variance and standard deviation computations for grouped data. For individual data, see \code{\link[stats]{var}} and \code{\link[stats]{sd}} from the \pkg{stats} package. For grouped data with group boundaries \eqn{c_0, c_1, \dots, c_r}{c[0], c[1], \dots, c[r]} and group frequencies \eqn{n_1, \dots, n_r}{n[1], \dots, n[r]}, \code{var} computes the sample variance \deqn{\frac{1}{n - 1} \sum_{j = 1}^r n_j (a_j - m_1)^2,}{% (1/(n - 1)) * sum(j; n[j] * (a[j] - m)^2,} where \eqn{a_j = (c_{j - 1} + c_j)/2}{a[j] = (c[j - 1] + c[j])/2} is the midpoint of the \eqn{j}th interval, \eqn{m_1}{m} is the sample mean (or sample first moment) of the data, and \eqn{n = \sum_{j = 1}^r n_j}{n = sum(j; n[j])}. The sample sample standard deviation is the square root of the sample variance. The sample variance for grouped data differs from the variance computed from the empirical raw moments with \code{\link{emm}} in two aspects. First, it takes into account the degrees of freedom. Second, it applies Sheppard's correction factor to compensate for the overestimation of the true variation in the data. For groups of equal width \eqn{k}, Sheppard's correction factor is equal to \eqn{-k^2/12}. } \value{ A named vector of variances or standard deviations. } \seealso{ \code{\link{grouped.data}} to create grouped data objects; \code{\link{mean.grouped.data}} for the mean and \code{\link{emm}} for higher moments. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998), \emph{Loss Models, From Data to Decisions}, Wiley. Heumann, C., Schomaker, M., Shalabh (2016), \emph{Introduction to Statistics and Data Analysis}, Springer. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}. Variance and standard deviation methods for grouped data contributed by Walter Garcia-Fontes \email{walter.garcia@upf.edu}. } \examples{ data(gdental) var(gdental) sd(gdental) ## Illustration of Sheppard's correction factor cj <- c(0, 2, 4, 6, 8) nj <- c(1, 5, 3, 2) gd <- grouped.data(Group = cj, Frequency = nj) (sum(nj) - 1)/sum(nj) * var(gd) (emm(gd, 2) - emm(gd)^2) - 4/12 } \keyword{univar} actuar/man/hachemeister.Rd0000644000176200001440000000154615147745722015260 0ustar liggesusers\name{hachemeister} \docType{data} \alias{hachemeister} \title{Hachemeister Data Set} \description{ Hachemeister (1975) data set giving average claim amounts in private passenger bodily injury insurance in five U.S. states over 12 quarters between July 1970 and June 1973 and the corresponding number of claims. } \usage{hachemeister} \format{ A matrix with 5 rows and the following 25 columns: \describe{ \item{\code{state}}{the state number;} \item{\code{ratio.1}, \dots, \code{ratio.12}}{the average claim amounts;} \item{\code{weight.1}, \dots, \code{weight.12}}{the corresponding number of claims.} } } \source{ Hachemeister, C. A. (1975), \emph{Credibility for regression models with application to trend}, Proceedings of the Berkeley Actuarial Research Conference on Credibility, Academic Press. } \keyword{datasets} actuar/man/aggregateDist.Rd0000644000176200001440000003061415147745722015367 0ustar liggesusers\name{aggregateDist} \alias{aggregateDist} \alias{print.aggregateDist} \alias{plot.aggregateDist} \alias{summary.aggregateDist} \alias{mean.aggregateDist} \alias{diff.aggregateDist} \title{Aggregate Claim Amount Distribution} \description{ Compute the aggregate claim amount cumulative distribution function of a portfolio over a period using one of five methods. } \usage{ aggregateDist(method = c("recursive", "convolution", "normal", "npower", "simulation"), model.freq = NULL, model.sev = NULL, p0 = NULL, x.scale = 1, convolve = 0, moments, nb.simul, \dots, tol = 1e-06, maxit = 500, echo = FALSE) \method{print}{aggregateDist}(x, \dots) \method{plot}{aggregateDist}(x, xlim, ylab = expression(F[S](x)), main = "Aggregate Claim Amount Distribution", sub = comment(x), \dots) \method{summary}{aggregateDist}(object, \dots) \method{mean}{aggregateDist}(x, \dots) \method{diff}{aggregateDist}(x, \dots) } \arguments{ \item{method}{method to be used} \item{model.freq}{for \code{"recursive"} method: a character string giving the name of a distribution in the \eqn{(a, b, 0)} or \eqn{(a, b, 1)} families of distributions. For \code{"convolution"} method: a vector of claim number probabilities. For \code{"simulation"} method: a frequency simulation model (see \code{\link{rcomphierarc}} for details) or \code{NULL}. Ignored with \code{normal} and \code{npower} methods.} \item{model.sev}{for \code{"recursive"} and \code{"convolution"} methods: a vector of claim amount probabilities. For \code{"simulation"} method: a severity simulation model (see \code{\link{rcomphierarc}} for details) or \code{NULL}. Ignored with \code{normal} and \code{npower} methods.} \item{p0}{arbitrary probability at zero for the frequency distribution. Creates a zero-modified or zero-truncated distribution if not \code{NULL}. Used only with \code{"recursive"} method.} \item{x.scale}{value of an amount of 1 in the severity model (monetary unit). Used only with \code{"recursive"} and \code{"convolution"} methods.} \item{convolve}{number of times to convolve the resulting distribution with itself. Used only with \code{"recursive"} method.} \item{moments}{vector of the true moments of the aggregate claim amount distribution; required only by the \code{"normal"} or \code{"npower"} methods.} \item{nb.simul}{number of simulations for the \code{"simulation"} method.} \item{\dots}{parameters of the frequency distribution for the \code{"recursive"} method; further arguments to be passed to or from other methods otherwise.} \item{tol}{the resulting cumulative distribution in the \code{"recursive"} method will get less than \code{tol} away from 1.} \item{maxit}{maximum number of recursions in the \code{"recursive"} method.} \item{echo}{logical; echo the recursions to screen in the \code{"recursive"} method.} \item{x, object}{an object of class \code{"aggregateDist"}.} \item{xlim}{numeric of length 2; the \eqn{x} limits of the plot.} \item{ylab}{label of the y axis.} \item{main}{main title.} \item{sub}{subtitle, defaulting to the calculation method.} } \details{ \code{aggregateDist} returns a function to compute the cumulative distribution function (cdf) of the aggregate claim amount distribution in any point. The \code{"recursive"} method computes the cdf using the Panjer algorithm; the \code{"convolution"} method using convolutions; the \code{"normal"} method using a normal approximation; the \code{"npower"} method using the Normal Power 2 approximation; the \code{"simulation"} method using simulations. More details follow. } \section{Recursive method}{ The frequency distribution must be a member of the \eqn{(a, b, 0)} or \eqn{(a, b, 1)} families of discrete distributions. To use a distribution from the \eqn{(a, b, 0)} family, \code{model.freq} must be one of \code{"binomial"}, \code{"geometric"}, \code{"negative binomial"} or \code{"poisson"}, and \code{p0} must be \code{NULL}. To use a zero-truncated distribution from the \eqn{(a, b, 1)} family, \code{model.freq} may be one of the strings above together with \code{p0 = 0}. As a shortcut, \code{model.freq} may also be one of \code{"zero-truncated binomial"}, \code{"zero-truncated geometric"}, \code{"zero-truncated negative binomial"}, \code{"zero-truncated poisson"} or \code{"logarithmic"}, and \code{p0} is then ignored (with a warning if non \code{NULL}). (Note: since the logarithmic distribution is always zero-truncated. \code{model.freq = "logarithmic"} may be used with either \code{p0 = NULL} or \code{p0 = 0}.) To use a zero-modified distribution from the \eqn{(a, b, 1)} family, \code{model.freq} may be one of standard frequency distributions mentioned above with \code{p0} set to some probability that the distribution takes the value \eqn{0}. It is equivalent, but more explicit, to set \code{model.freq} to one of \code{"zero-modified binomial"}, \code{"zero-modified geometric"}, \code{"zero-modified negative binomial"}, \code{"zero-modified poisson"} or \code{"zero-modified logarithmic"}. The parameters of the frequency distribution must be specified using names identical to the arguments of the appropriate function \code{\link{dbinom}}, \code{\link{dgeom}}, \code{\link{dnbinom}}, \code{\link{dpois}} or \code{\link{dlogarithmic}}. In the latter case, do take note that the parametrization of \code{dlogarithmic} is different from Appendix B of Klugman et al. (2012). If the length of \code{p0} is greater than one, only the first element is used, with a warning. \code{model.sev} is a vector of the (discretized) claim amount distribution \eqn{X}; the first element \strong{must} be \eqn{f_X(0) = \Pr[X = 0]}{fx(0) = Pr[X = 0]}. The recursion will fail to start if the expected number of claims is too large. One may divide the appropriate parameter of the frequency distribution by \eqn{2^n} and convolve the resulting distribution \eqn{n =} \code{convolve} times. Failure to obtain a cumulative distribution function less than \code{tol} away from 1 within \code{maxit} iterations is often due to too coarse a discretization of the severity distribution. } \section{Convolution method}{ The cumulative distribution function (cdf) \eqn{F_S(x)}{Fs(x)} of the aggregate claim amount of a portfolio in the collective risk model is \deqn{F_S(x) = \sum_{n = 0}^{\infty} F_X^{*n}(x) p_n,}{% Fs(x) = sum(n; Fx^\{*n\}(x) * pn)} for \eqn{x = 0, 1, \dots}; \eqn{p_n = \Pr[N = n]}{pn = Pr[N = n]} is the frequency probability mass function and \eqn{F_X^{*n}(x)}{Fx^\{*n\}(x)} is the cdf of the \eqn{n}th convolution of the (discrete) claim amount random variable. \code{model.freq} is vector \eqn{p_n}{pn} of the number of claims probabilities; the first element \strong{must} be \eqn{\Pr[N = 0]}{Pr[N = 0]}. \code{model.sev} is vector \eqn{f_X(x)}{fx(x)} of the (discretized) claim amount distribution; the first element \strong{must} be \eqn{f_X(0)}{fx(0)}. } \section{Normal and Normal Power 2 methods}{ The Normal approximation of a cumulative distribution function (cdf) \eqn{F(x)} with mean \eqn{\mu}{m} and standard deviation \eqn{\sigma}{s} is \deqn{F(x) \approx \Phi\left( \frac{x - \mu}{\sigma} \right).}{% F(x) ~= pnorm((x - m)/s).} The Normal Power 2 approximation of a cumulative distribution function (cdf) \eqn{F(x)} with mean \eqn{\mu}{m}, standard deviation \eqn{\sigma}{s} and skewness \eqn{\gamma}{g} is \deqn{F(x) \approx \Phi \left(% -\frac{3}{\gamma} + \sqrt{\frac{9}{\gamma^2} + 1 % + \frac{6}{\gamma} \frac{x - \mu}{\sigma}} \right).}{% F(x) ~= pnorm(-3/g + sqrt(9/g^2 + 1 + (6/g) * (x - m)/s)).} This formula is valid only for the right-hand tail of the distribution and skewness should not exceed unity. } \section{Simulation method}{ This methods returns the empirical distribution function of a sample of size \code{nb.simul} of the aggregate claim amount distribution specified by \code{model.freq} and \code{model.sev}. \code{\link{rcomphierarc}} is used for the simulation of claim amounts, hence both the frequency and severity models can be mixtures of distributions. } \value{ A function of class \code{"aggregateDist"}, inheriting from the \code{"function"} class when using normal and Normal Power approximations and additionally inheriting from the \code{"ecdf"} and \code{"stepfun"} classes when other methods are used. There are methods available to summarize (\code{summary}), represent (\code{print}), plot (\code{plot}), compute quantiles (\code{quantile}) and compute the mean (\code{mean}) of \code{"aggregateDist"} objects. For the \code{diff} method: a numeric vector of probabilities corresponding to the probability mass function evaluated at the knots of the distribution. } \seealso{ \code{\link{discretize}} to discretize a severity distribution; \code{\link{mean.aggregateDist}} to compute the mean of the distribution; \code{\link{quantile.aggregateDist}} to compute the quantiles or the Value-at-Risk; \code{\link{CTE.aggregateDist}} to compute the Conditional Tail Expectation (or Tail Value-at-Risk); \code{\link{rcomphierarc}}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. Daykin, C.D., \enc{Pentikäinen}{Pentikainen}, T. and Pesonen, M. (1994), \emph{Practical Risk Theory for Actuaries}, Chapman & Hall. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Louis-Philippe Pouliot } \examples{ ## Convolution method (example 9.5 of Klugman et al. (2012)) fx <- c(0, 0.15, 0.2, 0.25, 0.125, 0.075, 0.05, 0.05, 0.05, 0.025, 0.025) pn <- c(0.05, 0.1, 0.15, 0.2, 0.25, 0.15, 0.06, 0.03, 0.01) Fs <- aggregateDist("convolution", model.freq = pn, model.sev = fx, x.scale = 25) summary(Fs) c(Fs(0), diff(Fs(25 * 0:21))) # probability mass function plot(Fs) ## Recursive method (example 9.10 of Klugman et al. (2012)) fx <- c(0, crossprod(c(2, 1)/3, matrix(c(0.6, 0.7, 0.4, 0, 0, 0.3), 2, 3))) Fs <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 3) plot(Fs) Fs(knots(Fs)) # cdf evaluated at its knots diff(Fs) # probability mass function ## Recursive method (high frequency) fx <- c(0, 0.15, 0.2, 0.25, 0.125, 0.075, 0.05, 0.05, 0.05, 0.025, 0.025) \dontrun{Fs <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 1000)} Fs <- aggregateDist("recursive", model.freq = "poisson", model.sev = fx, lambda = 250, convolve = 2, maxit = 1500) plot(Fs) ## Recursive method (zero-modified distribution; example 9.11 of ## Klugman et al. (2012)) Fn <- aggregateDist("recursive", model.freq = "binomial", model.sev = c(0.3, 0.5, 0.2), x.scale = 50, p0 = 0.4, size = 3, prob = 0.3) diff(Fn) ## Equivalent but more explicit call aggregateDist("recursive", model.freq = "zero-modified binomial", model.sev = c(0.3, 0.5, 0.2), x.scale = 50, p0 = 0.4, size = 3, prob = 0.3) ## Recursive method (zero-truncated distribution). Using 'fx' above ## would mean that both Pr[N = 0] = 0 and Pr[X = 0] = 0, therefore ## Pr[S = 0] = 0 and recursions would not start. fx <- discretize(pexp(x, 1), from = 0, to = 100, method = "upper") fx[1L] # non zero aggregateDist("recursive", model.freq = "zero-truncated poisson", model.sev = fx, lambda = 3, x.scale = 25, echo=TRUE) ## Normal Power approximation Fs <- aggregateDist("npower", moments = c(200, 200, 0.5)) Fs(210) ## Simulation method model.freq <- expression(data = rpois(3)) model.sev <- expression(data = rgamma(100, 2)) Fs <- aggregateDist("simulation", nb.simul = 1000, model.freq, model.sev) mean(Fs) plot(Fs) ## Evaluation of ruin probabilities using Beekman's formula with ## Exponential(1) claim severity, Poisson(1) frequency and premium rate ## c = 1.2. fx <- discretize(pexp(x, 1), from = 0, to = 100, method = "lower") phi0 <- 0.2/1.2 Fs <- aggregateDist(method = "recursive", model.freq = "geometric", model.sev = fx, prob = phi0) 1 - Fs(400) # approximate ruin probability u <- 0:100 plot(u, 1 - Fs(u), type = "l", main = "Ruin probability") } \keyword{distribution} \keyword{models} actuar/man/ZeroModifiedLogarithmic.Rd0000644000176200001440000000752215147745722017362 0ustar liggesusers\name{ZeroModifiedLogarithmic} \alias{ZeroModifiedLogarithmic} \alias{ZMLogarithmic} \alias{dzmlogarithmic} \alias{pzmlogarithmic} \alias{qzmlogarithmic} \alias{rzmlogarithmic} \title{The Zero-Modified Logarithmic Distribution} \description{ Density function, distribution function, quantile function and random generation for the Zero-Modified Logarithmic (or log-series) distribution with parameter \code{prob} and arbitrary probability at zero \code{p0}. } \usage{ dzmlogarithmic(x, prob, p0, log = FALSE) pzmlogarithmic(q, prob, p0, lower.tail = TRUE, log.p = FALSE) qzmlogarithmic(p, prob, p0, lower.tail = TRUE, log.p = FALSE) rzmlogarithmic(n, prob, p0) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{prob}{parameter. \code{0 <= prob < 1}.} \item{p0}{probability mass at zero. \code{0 <= p0 <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-modified logarithmic distribution with \code{prob} \eqn{= p} and \code{p0} \eqn{= p_0}{= p0} is a discrete mixture between a degenerate distribution at zero and a (standard) logarithmic. The probability mass function is \eqn{p(0) = p_0}{p(0) = p0} and \deqn{% p(x) = (1-p_0) f(x)}{p(x) = (1-p0) f(x)} for \eqn{x = 1, 2, \ldots}, \eqn{0 < p < 1} and \eqn{0 \le p_0 \le 1}{0 \le p0 \le 1}, where \eqn{f(x)} is the probability mass function of the logarithmic. The cumulative distribution function is \deqn{P(x) = p_0 + (1 - p_0) F(x)}{P(x) = p0 + (1 - p0) F(x).} The special case \code{p0 == 0} is the standard logarithmic. The zero-modified logarithmic distribution is the limiting case of the zero-modified negative binomial distribution with \code{size} parameter equal to \eqn{0}. Note that in this context, parameter \code{prob} generally corresponds to the probability of \emph{failure} of the zero-truncated negative binomial. If an element of \code{x} is not integer, the result of \code{dzmlogarithmic} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{F(x) \ge p}, where \eqn{F} is the distribution function. } \value{ \code{dzmlogarithmic} gives the probability mass function, \code{pzmlogarithmic} gives the distribution function, \code{qzmlogarithmic} gives the quantile function, and \code{rzmlogarithmic} generates random deviates. Invalid \code{prob} or \code{p0} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rzmlogarithmic}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}zmlogarithmic} use \code{\{d,p,q\}logarithmic} for all but the trivial input values and \eqn{p(0)}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dlogarithmic}} for the logarithmic distribution. \code{\link{dztnbinom}} for the zero modified negative binomial distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ p <- 1/(1 + 0.5) dzmlogarithmic(1:5, prob = p, p0 = 0.6) (1-0.6) * dlogarithmic(1:5, p)/plogarithmic(0, p, lower = FALSE) # same ## simple relation between survival functions pzmlogarithmic(0:5, p, p0 = 0.2, lower = FALSE) (1-0.2) * plogarithmic(0:5, p, lower = FALSE)/plogarithmic(0, p, lower = FALSE) # same qzmlogarithmic(pzmlogarithmic(0:10, 0.3, p0 = 0.6), 0.3, p0 = 0.6) } \keyword{distribution} actuar/man/InversePareto.Rd0000644000176200001440000000555515147745722015411 0ustar liggesusers\name{InversePareto} \alias{InversePareto} \alias{dinvpareto} \alias{pinvpareto} \alias{qinvpareto} \alias{rinvpareto} \alias{minvpareto} \alias{levinvpareto} \title{The Inverse Pareto Distribution} \description{ Density function, distribution function, quantile function, random generation raw moments and limited moments for the Inverse Pareto distribution with parameters \code{shape} and \code{scale}. } \usage{ dinvpareto(x, shape, scale, log = FALSE) pinvpareto(q, shape, scale, lower.tail = TRUE, log.p = FALSE) qinvpareto(p, shape, scale, lower.tail = TRUE, log.p = FALSE) rinvpareto(n, shape, scale) minvpareto(order, shape, scale) levinvpareto(limit, shape, scale, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape, scale}{parameters. Must be strictly positive.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The inverse Pareto distribution with parameters \code{shape} \eqn{= \tau}{= a} and \code{scale} \eqn{= \theta}{= s} has density: \deqn{f(x) = \frac{\tau \theta x^{\tau - 1}}{% (x + \theta)^{\tau + 1}}}{% f(x) = a s x^(a - 1)/(x + s)^(a + 1)} for \eqn{x > 0}, \eqn{\tau > 0}{a > 0} and \eqn{\theta > 0}{s > 0}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}, \eqn{-\tau < k < 1}{-shape < k < 1}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\tau}{k > -shape}. } \value{ \code{dinvpareto} gives the density, \code{pinvpareto} gives the distribution function, \code{qinvpareto} gives the quantile function, \code{rinvpareto} generates random deviates, \code{minvpareto} gives the \eqn{k}th raw moment, and \code{levinvpareto} calculates the \eqn{k}th limited moment. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ Evaluation of \code{levinvpareto} is done using numerical integration. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dinvpareto(2, 3, 4, log = TRUE)) p <- (1:10)/10 pinvpareto(qinvpareto(p, 2, 3), 2, 3) minvpareto(0.5, 1, 2) } \keyword{distribution} actuar/man/unroll.Rd0000644000176200001440000000242515147745722014127 0ustar liggesusers\name{unroll} \alias{unroll} \title{Display a Two-Dimension Version of a Matrix of Vectors} \description{ Displays all values of a matrix of vectors by \dQuote{unrolling} the object vertically or horizontally. } \usage{ unroll(x, bycol = FALSE, drop = TRUE) } \arguments{ \item{x}{a list of vectors with a \code{\link[base]{dim}} attribute of length 0, 1 or 2.} \item{bycol}{logical; whether to unroll horizontally (\code{FALSE}) or vertically (\code{TRUE}).} \item{drop}{logical; if \code{TRUE}, the result is coerced to the lowest possible dimension.} } \details{ \code{unroll} returns a matrix where elements of \code{x} are concatenated (\dQuote{unrolled}) by row (\code{bycol = FALSE}) or by column (\code{bycol = TRUE}). \code{NA} is used to make rows/columns of equal length. Vectors and one dimensional arrays are coerced to \strong{row} matrices. } \value{ A vector or matrix. } \seealso{ This function was originally written for use in \code{\link{severity.portfolio}}. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Louis-Philippe Pouliot } \examples{ x <- list(c(1:3), c(1:8), c(1:4), c(1:3)) (mat <- matrix(x, 2, 2)) unroll(mat) unroll(mat, bycol = TRUE) unroll(mat[1, ]) unroll(mat[1, ], drop = FALSE) } \keyword{manip} actuar/man/ruin.Rd0000644000176200001440000001336515147745722013576 0ustar liggesusers\name{ruin} \alias{ruin} \alias{plot.ruin} \title{Probability of Ruin} \description{ Calulation of infinite time probability of ruin in the models of \enc{Cramér}{Cramer}-Lundberg and Sparre Andersen, that is with exponential or phase-type (including mixtures of exponentials, Erlang and mixture of Erlang) claims interarrival time. } \usage{ ruin(claims = c("exponential", "Erlang", "phase-type"), par.claims, wait = c("exponential", "Erlang", "phase-type"), par.wait, premium.rate = 1, tol = sqrt(.Machine$double.eps), maxit = 200L, echo = FALSE) \method{plot}{ruin}(x, from = NULL, to = NULL, add = FALSE, xlab = "u", ylab = expression(psi(u)), main = "Probability of Ruin", xlim = NULL, \dots) } \arguments{ \item{claims}{character; the type of claim severity distribution.} \item{wait}{character; the type of claim interarrival (wait) time distribution.} \item{par.claims, par.wait}{named list containing the parameters of the distribution; see Details.} \item{premium.rate}{numeric vector of length 1; the premium rate.} \item{tol, maxit, echo}{respectively the tolerance level of the stopping criteria, the maximum number of iterations and whether or not to echo the procedure when the transition rates matrix is determined iteratively. Ignored if \code{wait = "exponential"}.} \item{x}{an object of class \code{"ruin"}.} \item{from, to}{the range over which the function will be plotted.} \item{add}{logical; if \code{TRUE} add to already existing plot.} \item{xlim}{numeric of length 2; if specified, it serves as default for \code{c(from, to)}.} \item{xlab, ylab}{label of the x and y axes, respectively.} \item{main}{main title.} \item{\dots}{further graphical parameters accepted by \code{\link[graphics]{curve}}.} } \details{ The names of the parameters in \code{par.claims} and \code{par.wait} must be the same as in \code{\link[stats]{dexp}}, \code{\link[stats]{dgamma}} or \code{\link{dphtype}}, as appropriate. A model will be a mixture of exponential or Erlang distributions (but not phase-type) when the parameters are vectors of length \eqn{> 1} and the parameter list contains a vector \code{weights} of the coefficients of the mixture. Parameters are recycled when needed. Their names can be abbreviated. Combinations of exponentials as defined in Dufresne and Gerber (1988) are \emph{not} supported. Ruin probabilities are evaluated using \code{\link{pphtype}} except when both distributions are exponential, in which case an explicit formula is used. When \code{wait != "exponential"} (Sparre Andersen model), the transition rate matrix \eqn{\boldsymbol{Q}}{Q} of the distribution of the probability of ruin is determined iteratively using a fixed point-like algorithm. The stopping criteria used is% \deqn{\max \left\{ \sum_{j = 1}^n |\boldsymbol{Q}_{ij} - \boldsymbol{Q}_{ij}^\prime| \right\} < \code{tol},}{% max(rowSum(|Q - Q'|)) < tol,}% where \eqn{\boldsymbol{Q}}{Q} and \eqn{\boldsymbol{Q}^\prime}{Q'} are two successive values of the matrix. } \value{ A function of class \code{"ruin"} inheriting from the \code{"function"} class to compute the probability of ruin given initial surplus levels. The function has arguments: \item{u}{numeric vector of initial surplus levels;} \item{survival}{logical; if \code{FALSE} (default), probabilities are \eqn{\psi(u)}{psi(u)}, otherwise, \eqn{\phi(u) = 1 - \psi(u)}{phi(u) = 1 - psi(u)};} \item{lower.tail}{an alias for \code{!survival}.} } \references{ Asmussen, S. and Rolski, T. (1991), Computational methods in risk theory: A matrix algorithmic approach, \emph{Insurance: Mathematics and Economics} \bold{10}, 259--274. Dufresne, F. and Gerber, H. U. (1988), Three methods to calculate the probability of ruin, \emph{Astin Bulletin} \bold{19}, 71--90. Gerber, H. U. (1979), \emph{An Introduction to Mathematical Risk Theory}, Huebner Foundation. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca}, and Christophe Dutang } \examples{ ## Case with an explicit formula: exponential claims and exponential ## interarrival times. psi <- ruin(claims = "exponential", par.claims = list(rate = 5), wait = "exponential", par.wait = list(rate = 3)) psi psi(0:10) plot(psi, from = 0, to = 10) ## Mixture of two exponentials for claims, exponential interarrival ## times (Gerber 1979) psi <- ruin(claims = "exponential", par.claims = list(rate = c(3, 7), w = 0.5), wait = "exponential", par.wait = list(rate = 3), pre = 1) u <- 0:10 psi(u) (24 * exp(-u) + exp(-6 * u))/35 # same ## Mixtures of Erlang distributions for claims and interarrival times. psi <- ruin(claims = 'Erlang', par.claims = list(shape = c(1, 2, 3), scale = c(3, 2, 1), weights = c(3, 4, 1)/8), wait = 'Erlang', par.wait = list(shape = c(6, 4, 4), scale= c(3, 2, 2), weights = c(3, 4, 1)/8), premium.rate = 0.6) psi(0:10) ## Phase-type claims, exponential interarrival times (Asmussen and ## Rolski 1991) p <- c(0.5614, 0.4386) r <- matrix(c(-8.64, 0.101, 1.997, -1.095), 2, 2) lambda <- 1/(1.1 * mphtype(1, p, r)) psi <- ruin(claims = "p", par.claims = list(prob = p, rates = r), wait = "e", par.wait = list(rate = lambda)) psi plot(psi, xlim = c(0, 50)) ## Phase-type claims, mixture of two exponentials for interarrival times ## (Asmussen and Rolski 1991) a <- (0.4/5 + 0.6) * lambda ruin(claims = "p", par.claims = list(prob = p, rates = r), wait = "e", par.wait = list(rate = c(5 * a, a), weights = c(0.4, 0.6)), maxit = 225L) } \keyword{models} actuar/man/LognormalMoments.Rd0000644000176200001440000000247015147745722016111 0ustar liggesusers\name{LognormalMoments} \alias{LognormalMoments} \alias{mlnorm} \alias{levlnorm} \title{Raw and Limited Moments of the Lognormal Distribution} \description{ Raw moments and limited moments for the Lognormal distribution whose logarithm has mean equal to \code{meanlog} and standard deviation equal to \code{sdlog}. } \usage{ mlnorm(order, meanlog = 0, sdlog = 1) levlnorm(limit, meanlog = 0, sdlog = 1, order = 1) } \arguments{ \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} \item{meanlog, sdlog}{mean and standard deviation of the distribution on the log scale with default values of \code{0} and \code{1} respectively.} } \value{ \code{mlnorm} gives the \eqn{k}th raw moment and \code{levlnorm} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link{Lognormal}} for details on the lognormal distribution and functions \code{[dpqr]lnorm}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ mlnorm(2, 3, 4) - mlnorm(1, 3, 4)^2 levlnorm(10, 3, 4, order = 2) } \keyword{distribution} actuar/man/Loggamma.Rd0000644000176200001440000000614315147745722014341 0ustar liggesusers\name{Loggamma} \alias{Loggamma} \alias{dlgamma} \alias{plgamma} \alias{qlgamma} \alias{rlgamma} \alias{mlgamma} \alias{levlgamma} \title{The Loggamma Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Loggamma distribution with parameters \code{shapelog} and \code{ratelog}. } \usage{ dlgamma(x, shapelog, ratelog, log = FALSE) plgamma(q, shapelog, ratelog, lower.tail = TRUE, log.p = FALSE) qlgamma(p, shapelog, ratelog, lower.tail = TRUE, log.p = FALSE) rlgamma(n, shapelog, ratelog) mlgamma(order, shapelog, ratelog) levlgamma(limit, shapelog, ratelog, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shapelog, ratelog}{parameters. Must be strictly positive.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The loggamma distribution with parameters \code{shapelog} \eqn{= \alpha}{= a} and \code{ratelog} \eqn{= \lambda}{= b} has density: \deqn{f(x) = \frac{\lambda^\alpha}{\Gamma(\alpha)}% \frac{(\log x)^{\alpha - 1}}{x^{\lambda + 1}}}{% f(x) = (b^a (log(x))^(a - 1))/(Gamma(a) * x^(b + 1))} for \eqn{x > 1}, \eqn{\alpha > 0}{a > 0} and \eqn{\lambda > 0}{b > 0}. (Here \eqn{\Gamma(\alpha)}{Gamma(a)} is the function implemented by \R's \code{\link{gamma}()} and defined in its help.) The loggamma is the distribution of the random variable \eqn{e^X}{exp(X)}, where \eqn{X} has a gamma distribution with shape parameter \eqn{alpha}{a} and scale parameter \eqn{1/\lambda}{1/b}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k < \lambda}{k < ratelog}. } \value{ \code{dlgamma} gives the density, \code{plgamma} gives the distribution function, \code{qlgamma} gives the quantile function, \code{rlgamma} generates random deviates, \code{mlgamma} gives the \eqn{k}th raw moment, and \code{levlgamma} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Hogg, R. V. and Klugman, S. A. (1984), \emph{Loss Distributions}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dlgamma(2, 3, 4, log = TRUE)) p <- (1:10)/10 plgamma(qlgamma(p, 2, 3), 2, 3) mlgamma(2, 3, 4) - mlgamma(1, 3, 4)^2 levlgamma(10, 3, 4, order = 2) } \keyword{distribution} actuar/man/ZeroTruncatedNegativeBinomial.Rd0000644000176200001440000001155215147745722020544 0ustar liggesusers\name{ZeroTruncatedNegativeBinomial} \alias{ZeroTruncatedNegativeBinomial} \alias{ZTNegativeBinomial} \alias{ZTNegBinomial} \alias{dztnbinom} \alias{pztnbinom} \alias{qztnbinom} \alias{rztnbinom} \title{The Zero-Truncated Negative Binomial Distribution} \description{ Density function, distribution function, quantile function and random generation for the Zero-Truncated Negative Binomial distribution with parameters \code{size} and \code{prob}. } \usage{ dztnbinom(x, size, prob, log = FALSE) pztnbinom(q, size, prob, lower.tail = TRUE, log.p = FALSE) qztnbinom(p, size, prob, lower.tail = TRUE, log.p = FALSE) rztnbinom(n, size, prob) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{size}{target for number of successful trials, or dispersion parameter. Must be positive, need not be integer.} \item{prob}{parameter. \code{0 < prob <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-truncated negative binomial distribution with \code{size} \eqn{= r} and \code{prob} \eqn{= p} has probability mass function \deqn{% p(x) = \frac{\Gamma(x + r) p^r (1 - p)^x}{\Gamma(r) x! (1 - p^r)}}{% p(x) = [\Gamma(x+r) p^r (1-p)^x]/[\Gamma(n) x! (1-p^r)]} for \eqn{x = 1, 2, \ldots}, \eqn{r \ge 0} and \eqn{0 < p < 1}, and \eqn{p(1) = 1} when \eqn{p = 1}. The cumulative distribution function is \deqn{P(x) = \frac{F(x) - F(0)}{1 - F(0)},}{% P(x) = [F(x) - F(0)]/[1 - F(0)],} where \eqn{F(x)} is the distribution function of the standard negative binomial. The mean is \eqn{r(1-p)/(p(1-p^r))} and the variance is \eqn{[r(1-p)(1 - (1 + r(1-p))p^r)]/[p(1-p^r)]^2}. In the terminology of Klugman et al. (2012), the zero-truncated negative binomial is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = 1-p} and \eqn{b = (r-1)(1-p)}. The limiting case \code{size == 0} is the \link[=Logarithmic]{logarithmic} distribution with parameter \code{1 - prob}. Unlike the standard negative binomial functions, parametrization through the mean \code{mu} is not supported to avoid ambiguity as to whether \code{mu} is the mean of the underlying negative binomial or the mean of the zero-truncated distribution. If an element of \code{x} is not integer, the result of \code{dztnbinom} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dztnbinom} gives the (log) probability mass function, \code{pztnbinom} gives the (log) distribution function, \code{qztnbinom} gives the quantile function, and \code{rztnbinom} generates random deviates. Invalid \code{size} or \code{prob} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rztnbinom}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}ztnbinom} use \code{\{d,p,q\}nbinom} for all but the trivial input values and \eqn{p(0)}. \code{rztnbinom} uses the simple inversion algorithm suggested by Peter Dalgaard on the r-help mailing list on 1 May 2005 % (\url{https://stat.ethz.ch/pipermail/r-help/2005-May/070680.html}). } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dnbinom}} for the negative binomial distribution. \code{\link{dztgeom}} for the zero-truncated geometric and \code{\link{dlogarithmic}} for the logarithmic, which are special cases of the zero-truncated negative binomial. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ ## Example 6.3 of Klugman et al. (2012) p <- 1/(1 + 0.5) dztnbinom(c(1, 2, 3), size = 2.5, prob = p) dnbinom(c(1, 2, 3), 2.5, p)/pnbinom(0, 2.5, p, lower = FALSE) # same pztnbinom(1, 2, prob = 1) # point mass at 1 dztnbinom(2, size = 1, 0.25) # == dztgeom(2, 0.25) dztnbinom(2, size = 0, 0.25) # == dlogarithmic(2, 0.75) qztnbinom(pztnbinom(1:10, 2.5, 0.3), 2.5, 0.3) x <- rztnbinom(1000, size = 2.5, prob = 0.4) y <- sort(unique(x)) plot(y, table(x)/length(x), type = "h", lwd = 2, pch = 19, col = "black", xlab = "x", ylab = "p(x)", main = "Empirical vs theoretical probabilities") points(y, dztnbinom(y, size = 2.5, prob = 0.4), pch = 19, col = "red") legend("topright", c("empirical", "theoretical"), lty = c(1, NA), lwd = 2, pch = c(NA, 19), col = c("black", "red")) } \keyword{distribution} actuar/man/SingleParameterPareto.Rd0000644000176200001440000000643015147745722017051 0ustar liggesusers\name{SingleParameterPareto} \alias{SingleParameterPareto} \alias{dpareto1} \alias{ppareto1} \alias{qpareto1} \alias{rpareto1} \alias{mpareto1} \alias{levpareto1} \title{The Single-parameter Pareto Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments, and limited moments for the Single-parameter Pareto distribution with parameter \code{shape}. } \usage{ dpareto1(x, shape, min, log = FALSE) ppareto1(q, shape, min, lower.tail = TRUE, log.p = FALSE) qpareto1(p, shape, min, lower.tail = TRUE, log.p = FALSE) rpareto1(n, shape, min) mpareto1(order, shape, min) levpareto1(limit, shape, min, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape}{parameter. Must be strictly positive.} \item{min}{lower bound of the support of the distribution.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The single-parameter Pareto, or Pareto I, distribution with parameter \code{shape} \eqn{= \alpha}{= a} has density: \deqn{f(x) = \frac{\alpha \theta^\alpha}{x^{\alpha + 1}}}{% f(x) = a b^a/x^(a + 1)} for \eqn{x > \theta}{x > b}, \eqn{\alpha > 0}{a > 0} and \eqn{\theta > 0}{b > 0}. Although there appears to be two parameters, only \code{shape} is a true parameter. The value of \code{min} \eqn{= \theta}{= b} must be set in advance. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{k < \alpha}{k < shape} and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{x \ge \theta}{x \ge min}. } \value{ \code{dpareto1} gives the density, \code{ppareto1} gives the distribution function, \code{qpareto1} gives the quantile function, \code{rpareto1} generates random deviates, \code{mpareto1} gives the \eqn{k}th raw moment, and \code{levpareto1} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ For Pareto distributions, we use the classification of Arnold (2015) with the parametrization of Klugman et al. (2012). The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Arnold, B.C. (2015), \emph{Pareto Distributions}, Second Edition, CRC Press. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dpareto}} for the two-parameter Pareto distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dpareto1(5, 3, 4, log = TRUE)) p <- (1:10)/10 ppareto1(qpareto1(p, 2, 3), 2, 3) mpareto1(2, 3, 4) - mpareto(1, 3, 4) ^ 2 levpareto(10, 3, 4, order = 2) } \keyword{distribution} actuar/man/WeibullMoments.Rd0000644000176200001440000000263215147745722015562 0ustar liggesusers\name{WeibullMoments} \alias{WeibullMoments} \alias{mweibull} \alias{levweibull} \title{Raw and Limited Moments of the Weibull Distribution} \description{ Raw moments and limited moments for the Weibull distribution with parameters \code{shape} and \code{scale}. } \usage{ mweibull(order, shape, scale = 1) levweibull(limit, shape, scale = 1, order = 1) } \arguments{ \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} \item{shape, scale}{shape and scale parameters, the latter defaulting to 1.} } \details{ The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]} and the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\tau}{k > -shape}. } \value{ \code{mweibull} gives the \eqn{k}th raw moment and \code{levweibull} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link{Weibull}} for details on the Weibull distribution and functions \code{[dpqr]weibull}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ mweibull(2, 3, 4) - mweibull(1, 3, 4)^2 levweibull(10, 3, 4, order = 2) } \keyword{distribution} actuar/man/ZeroModifiedNegativeBinomial.Rd0000644000176200001440000001156115147745722020333 0ustar liggesusers\name{ZeroModifiedNegativeBinomial} \alias{ZeroModifiedNegativeBinomial} \alias{ZMNegativeBinomial} \alias{ZMNegBinomial} \alias{dzmnbinom} \alias{pzmnbinom} \alias{qzmnbinom} \alias{rzmnbinom} \title{The Zero-Modified Negative Binomial Distribution} \description{ Density function, distribution function, quantile function and random generation for the Zero-Modified Negative Binomial distribution with parameters \code{size} and \code{prob}, and arbitrary probability at zero \code{p0}. } \usage{ dzmnbinom(x, size, prob, p0, log = FALSE) pzmnbinom(q, size, prob, p0, lower.tail = TRUE, log.p = FALSE) qzmnbinom(p, size, prob, p0, lower.tail = TRUE, log.p = FALSE) rzmnbinom(n, size, prob, p0) } \arguments{ \item{x}{vector of (strictly positive integer) quantiles.} \item{q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{size}{target for number of successful trials, or dispersion parameter. Must be positive, need not be integer.} \item{prob}{parameter. \code{0 < prob <= 1}.} \item{p0}{probability mass at zero. \code{0 <= p0 <= 1}.} \item{log, log.p}{logical; if \code{TRUE}, probabilities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}, otherwise, \eqn{P[X > x]}.} } \details{ The zero-modified negative binomial distribution with \code{size} \eqn{= r}, \code{prob} \eqn{= p} and \code{p0} \eqn{= p_0}{= p0} is a discrete mixture between a degenerate distribution at zero and a (standard) negative binomial. The probability mass function is \eqn{p(0) = p_0}{p(0) = p0} and \deqn{% p(x) = \frac{(1-p_0)}{(1-p^r)} f(x)}{% p(x) = (1-p0)/(1-p^r) f(x)} for \eqn{x = 1, 2, \ldots}, \eqn{r \ge 0}, \eqn{0 < p < 1} and \eqn{0 \le p_0 \le 1}{0 \le p0 \le 1}, where \eqn{f(x)} is the probability mass function of the negative binomial. The cumulative distribution function is \deqn{P(x) = p_0 + (1 - p_0) \left(\frac{F(x) - F(0)}{1 - F(0)}\right)}{% P(x) = p0 + (1 - p0) [F(x) - F(0)]/[1 - F(0)].} The mean is \eqn{(1-p_0) \mu}{(1-p0)m} and the variance is \eqn{(1-p_0) \sigma^2 + p_0(1-p_0) \mu^2}{(1-p0)v + p0(1-p0)m^2}, where \eqn{\mu}{m} and \eqn{\sigma^2}{v} are the mean and variance of the zero-truncated negative binomial. In the terminology of Klugman et al. (2012), the zero-modified negative binomial is a member of the \eqn{(a, b, 1)} class of distributions with \eqn{a = 1-p} and \eqn{b = (r-1)(1-p)}. The special case \code{p0 == 0} is the zero-truncated negative binomial. The limiting case \code{size == 0} is the zero-modified logarithmic distribution with parameters \code{1 - prob} and \code{p0}. Unlike the standard negative binomial functions, parametrization through the mean \code{mu} is not supported to avoid ambiguity as to whether \code{mu} is the mean of the underlying negative binomial or the mean of the zero-modified distribution. If an element of \code{x} is not integer, the result of \code{dzmnbinom} is zero, with a warning. The quantile is defined as the smallest value \eqn{x} such that \eqn{P(x) \ge p}, where \eqn{P} is the distribution function. } \value{ \code{dzmnbinom} gives the (log) probability mass function, \code{pzmnbinom} gives the (log) distribution function, \code{qzmnbinom} gives the quantile function, and \code{rzmnbinom} generates random deviates. Invalid \code{size}, \code{prob} or \code{p0} will result in return value \code{NaN}, with a warning. The length of the result is determined by \code{n} for \code{rzmnbinom}, and is the maximum of the lengths of the numerical arguments for the other functions. } \note{ Functions \code{\{d,p,q\}zmnbinom} use \code{\{d,p,q\}nbinom} for all but the trivial input values and \eqn{p(0)}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \seealso{ \code{\link{dnbinom}} for the negative binomial distribution. \code{\link{dztnbinom}} for the zero-truncated negative binomial distribution. \code{\link{dzmgeom}} for the zero-modified geometric and \code{\link{dzmlogarithmic}} for the zero-modified logarithmic, which are special cases of the zero-modified negative binomial. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ ## Example 6.3 of Klugman et al. (2012) p <- 1/(1 + 0.5) dzmnbinom(1:5, size = 2.5, prob = p, p0 = 0.6) (1-0.6) * dnbinom(1:5, 2.5, p)/pnbinom(0, 2.5, p, lower = FALSE) # same ## simple relation between survival functions pzmnbinom(0:5, 2.5, p, p0 = 0.2, lower = FALSE) (1-0.2) * pnbinom(0:5, 2.5, p, lower = FALSE) / pnbinom(0, 2.5, p, lower = FALSE) # same qzmnbinom(pzmnbinom(0:10, 2.5, 0.3, p0 = 0.1), 2.5, 0.3, p0 = 0.1) } \keyword{distribution} actuar/man/InverseGaussian.Rd0000644000176200001440000001333715151206331015705 0ustar liggesusers\name{InverseGaussian} \alias{InverseGaussian} \alias{dinvgauss} \alias{pinvgauss} \alias{qinvgauss} \alias{rinvgauss} \alias{minvgauss} \alias{levinvgauss} \alias{mgfinvgauss} \title{The Inverse Gaussian Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments, limited moments and moment generating function for the Inverse Gaussian distribution with parameters \code{mean} and \code{shape}. } \usage{ dinvgauss(x, mean, shape = 1, dispersion = 1/shape, log = FALSE) pinvgauss(q, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE) qinvgauss(p, mean, shape = 1, dispersion = 1/shape, lower.tail = TRUE, log.p = FALSE, tol = 1e-14, maxit = 100, echo = FALSE, trace = echo) rinvgauss(n, mean, shape = 1, dispersion = 1/shape) minvgauss(order, mean, shape = 1, dispersion = 1/shape) levinvgauss(limit, mean, shape = 1, dispersion = 1/shape, order = 1) mgfinvgauss(t, mean, shape = 1, dispersion = 1/shape, log = FALSE) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{mean, shape}{parameters. Must be strictly positive. Infinite values are supported.} \item{dispersion}{an alternative way to specify the shape.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment. Only \code{order = 1} is supported by \code{levinvgauss}.} \item{limit}{limit of the loss variable.} \item{tol}{small positive value. Tolerance to assess convergence in the Newton computation of quantiles.} \item{maxit}{positive integer; maximum number of recursions in the Newton computation of quantiles.} \item{echo, trace}{logical; echo the recursions to screen in the Newton computation of quantiles.} \item{t}{numeric vector.} } \details{ The inverse Gaussian distribution with parameters \code{mean} \eqn{= \mu} and \code{dispersion} \eqn{= \phi} has density: \deqn{f(x) = \left( \frac{1}{2 \pi \phi x^3} \right)^{1/2} \exp\left( -\frac{(x - \mu)^2}{2 \mu^2 \phi x} \right),}{% f(x) = sqrt(1/(2 \pi \phi x^3)) * exp(-((x - \mu)^2)/(2 \mu^2 \phi x)),} for \eqn{x \ge 0}, \eqn{\mu > 0} and \eqn{\phi > 0}. The limiting case \eqn{\mu = \infty}{\mu = Inf} is an inverse chi-squared distribution (or inverse gamma with \code{shape} \eqn{= 1/2} and \code{rate} \eqn{= 2}\code{phi}). This distribution has no finite strictly positive, integer moments. The limiting case \eqn{\phi = 0} is an infinite spike in \eqn{x = 0}. If the random variable \eqn{X} is IG\eqn{(\mu, \phi)}, then \eqn{X/\mu} is IG\eqn{(1, \phi \mu)}{(1, \phi * \mu)}. The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{k = 1, 2, \dots}, the limited expected value at some limit \eqn{d} is \eqn{E[\min(X, d)]}{E[min(X, d)]} and the moment generating function is \eqn{E[e^{tX}]}. The moment generating function of the inverse guassian is defined for \code{t <= 1/(2 * mean^2 * phi)}. } \value{ \code{dinvgauss} gives the density, \code{pinvgauss} gives the distribution function, \code{qinvgauss} gives the quantile function, \code{rinvgauss} generates random deviates, \code{minvgauss} gives the \eqn{k}th raw moment, \code{levinvgauss} gives the limited expected value, and \code{mgfinvgauss} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ Functions \code{dinvgauss}, \code{pinvgauss} and \code{qinvgauss} are C implementations of functions of the same name in package \pkg{statmod}; see Giner and Smyth (2016). Devroye (1986, chapter 4) provides a nice presentation of the algorithm to generate random variates from an inverse Gaussian distribution. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Giner, G. and Smyth, G. K. (2016), \dQuote{\pkg{statmod}: Probability Calculations for the Inverse Gaussian Distribution}, \emph{The R Journal}, vol. 8, no 1, p. 339-351. \doi{10.32614/RJ-2016-024} Chhikara, R. S. and Folk, T. L. (1989), \emph{The Inverse Gaussian Distribution: Theory, Methodology and Applications}, Decker. Devroye, L. (1986), \emph{Non-Uniform Random Variate Generation}, Springer-Verlag. \url{https://luc.devroye.org/rnbookindex.html} } \seealso{ \code{\link{dinvgamma}} for the inverse gamma distribution. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ dinvgauss(c(-1, 0, 1, 2, Inf), mean = 1.5, dis = 0.7) dinvgauss(c(-1, 0, 1, 2, Inf), mean = Inf, dis = 0.7) dinvgauss(c(-1, 0, 1, 2, Inf), mean = 1.5, dis = Inf) # spike at zero ## Typical graphical representations of the inverse Gaussian ## distribution. First fixed mean and varying shape; second ## varying mean and fixed shape. col = c("red", "blue", "green", "cyan", "yellow", "black") par = c(0.125, 0.5, 1, 2, 8, 32) curve(dinvgauss(x, 1, par[1]), from = 0, to = 2, col = col[1]) for (i in 2:6) curve(dinvgauss(x, 1, par[i]), add = TRUE, col = col[i]) curve(dinvgauss(x, par[1], 1), from = 0, to = 2, col = col[1]) for (i in 2:6) curve(dinvgauss(x, par[i], 1), add = TRUE, col = col[i]) pinvgauss(qinvgauss((1:10)/10, 1.5, shape = 2), 1.5, 2) minvgauss(1:4, 1.5, 2) levinvgauss(c(0, 0.5, 1, 1.2, 10, Inf), 1.5, 2) } \keyword{distribution} actuar/man/discretize.Rd0000644000176200001440000001076015147745722014762 0ustar liggesusers\name{discretize} \alias{discretize} \alias{discretise} \title{Discretization of a Continuous Distribution} \description{ Compute a discrete probability mass function from a continuous cumulative distribution function (cdf) with various methods. \code{discretise} is an alias for \code{discretize}. } \usage{ discretize(cdf, from, to, step = 1, method = c("upper", "lower", "rounding", "unbiased"), lev, by = step, xlim = NULL) discretise(cdf, from, to, step = 1, method = c("upper", "lower", "rounding", "unbiased"), lev, by = step, xlim = NULL) } \arguments{ \item{cdf}{an expression written as a function of \code{x}, or alternatively the name of a function, giving the cdf to discretize.} \item{from, to}{the range over which the function will be discretized.} \item{step}{numeric; the discretization step (or span, or lag).} \item{method}{discretization method to use.} \item{lev}{an expression written as a function of \code{x}, or alternatively the name of a function, to compute the limited expected value of the distribution corresponding to \code{cdf}. Used only with the \code{"unbiased"} method.} \item{by}{an alias for \code{step}.} \item{xlim}{numeric of length 2; if specified, it serves as default for \code{c(from, to)}.} } \details{ Usage is similar to \code{\link{curve}}. \code{discretize} returns the probability mass function (pmf) of the random variable obtained by discretization of the cdf specified in \code{cdf}. Let \eqn{F(x)} denote the cdf, \eqn{E[\min(X, x)]}{E[min(X, x)]]} the limited expected value at \eqn{x}, \eqn{h} the step, \eqn{p_x}{p[x]} the probability mass at \eqn{x} in the discretized distribution and set \eqn{a =} \code{from} and \eqn{b =} \code{to}. Method \code{"upper"} is the forward difference of the cdf \eqn{F}: \deqn{p_x = F(x + h) - F(x)}{p[x] = F(x + h) - F(x)} for \eqn{x = a, a + h, \dots, b - step}. Method \code{"lower"} is the backward difference of the cdf \eqn{F}: \deqn{p_x = F(x) - F(x - h)}{p[x] = F(x) - F(x - h)} for \eqn{x = a + h, \dots, b} and \eqn{p_a = F(a)}{p[a] = F(a)}. Method \code{"rounding"} has the true cdf pass through the midpoints of the intervals \eqn{[x - h/2, x + h/2)}: \deqn{p_x = F(x + h/2) - F(x - h/2)}{p[x] = F(x + h/2) - F(x - h/2)} for \eqn{x = a + h, \dots, b - step} and \eqn{p_a = F(a + h/2)}{p[a] = F(a + h/2)}. The function assumes the cdf is continuous. Any adjusment necessary for discrete distributions can be done via \code{cdf}. Method \code{"unbiased"} matches the first moment of the discretized and the true distributions. The probabilities are as follows: \deqn{p_a = \frac{E[\min(X, a)] - E[\min(X, a + h)]}{h} + 1 - F(a)}{% p[a] = (E[min(X, a)] - E[min(X, a + h)])/h + 1 - F(a)} \deqn{p_x = \frac{2 E[\min(X, x)] - E[\min(X, x - h)] - E[\min(X, x + h)]}{h}, \quad a < x < b}{% p[x] = (2 E[min(X, x)] - E[min(X, x - h)] - E[min(X, x + h)])/h, a < x < b} \deqn{p_b = \frac{E[\min(X, b)] - E[\min(X, b - h)]}{h} - 1 + F(b),}{% p[b] = (E[min(X, b)] - E[min(X, b - h)])/h - 1 + F(b).} } \value{ A numeric vector of probabilities suitable for use in \code{\link{aggregateDist}}. } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \seealso{ \code{\link{aggregateDist}} } \examples{ x <- seq(0, 5, 0.5) op <- par(mfrow = c(1, 1), col = "black") ## Upper and lower discretization fu <- discretize(pgamma(x, 1), method = "upper", from = 0, to = 5, step = 0.5) fl <- discretize(pgamma(x, 1), method = "lower", from = 0, to = 5, step = 0.5) curve(pgamma(x, 1), xlim = c(0, 5)) par(col = "blue") plot(stepfun(head(x, -1), diffinv(fu)), pch = 19, add = TRUE) par(col = "green") plot(stepfun(x, diffinv(fl)), pch = 19, add = TRUE) par(col = "black") ## Rounding (or midpoint) discretization fr <- discretize(pgamma(x, 1), method = "rounding", from = 0, to = 5, step = 0.5) curve(pgamma(x, 1), xlim = c(0, 5)) par(col = "blue") plot(stepfun(head(x, -1), diffinv(fr)), pch = 19, add = TRUE) par(col = "black") ## First moment matching fb <- discretize(pgamma(x, 1), method = "unbiased", lev = levgamma(x, 1), from = 0, to = 5, step = 0.5) curve(pgamma(x, 1), xlim = c(0, 5)) par(col = "blue") plot(stepfun(x, diffinv(fb)), pch = 19, add = TRUE) par(op) } \keyword{distribution} \keyword{models} actuar/man/ChisqSupp.Rd0000644000176200001440000000401515147745722014530 0ustar liggesusers\name{ChisqSupp} \alias{ChisqSupp} \alias{mchisq} \alias{levchisq} \alias{mgfchisq} \title{Moments and Moment Generating Function of the (non-central) Chi-Squared Distribution} \description{ Raw moments, limited moments and moment generating function for the chi-squared (\eqn{\chi^2}{chi^2}) distribution with \code{df} degrees of freedom and optional non-centrality parameter \code{ncp}. } \usage{ mchisq(order, df, ncp = 0) levchisq(limit, df, ncp = 0, order = 1) mgfchisq(t, df, ncp = 0, log= FALSE) } \arguments{ \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} \item{df}{degrees of freedom (non-negative, but can be non-integer).} \item{ncp}{non-centrality parameter (non-negative).} \item{t}{numeric vector.} \item{log}{logical; if \code{TRUE}, the cumulant generating function is returned.} } \details{ The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, the \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)]}{E[min(X, d)]} and the moment generating function is \eqn{E[e^{tX}]}. Only integer moments are supported for the non central Chi-square distribution (\code{ncp > 0}). The limited expected value is supported for the centered Chi-square distribution (\code{ncp = 0}). } \value{ \code{mchisq} gives the \eqn{k}th raw moment, \code{levchisq} gives the \eqn{k}th moment of the limited loss variable, and \code{mgfchisq} gives the moment generating function in \code{t}. Invalid arguments will result in return value \code{NaN}, with a warning. } \seealso{ \code{\link[stats]{Chisquare}} } \references{ Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. Johnson, N. L. and Kotz, S. (1970), \emph{Continuous Univariate Distributions, Volume 1}, Wiley. } \author{ Christophe Dutang, Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \examples{ mchisq(2, 3, 4) levchisq(10, 3, order = 2) mgfchisq(0.25, 3, 2) } \keyword{distribution} actuar/man/rcompound.Rd0000644000176200001440000000654115151206331014604 0ustar liggesusers\name{rcompound} \alias{rcompound} \alias{rcomppois} \title{Simulation from Compound Models} \description{ \code{rcompound} generates random variates from a compound model. \code{rcomppois} is a simplified version for a common case. } \usage{ rcompound(n, model.freq, model.sev, SIMPLIFY = TRUE) rcomppois(n, lambda, model.sev, SIMPLIFY = TRUE)} \arguments{ \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{model.freq, model.sev}{expressions specifying the frequency and severity simulation models with the number of variates omitted; see Details.} \item{lambda}{Poisson parameter.} \item{SIMPLIFY}{boolean; if \code{FALSE} the frequency and severity variates are returned along with the aggregate variates.} } \details{ \code{rcompound} generates variates from a random variable of the form \deqn{S = X_1 + ... + X_N,} where \eqn{N} is the frequency random variable and \eqn{X_1, X_2, \dots} are the severity random variables. The latter are mutually independent, identically distributed and independent from \eqn{N}. \code{model.freq} and \code{model.sev} specify the simulation models for the frequency and the severity random variables, respectively. A model is a complete call to a random number generation function, with the number of variates omitted. This is similar to \code{\link{rcomphierarc}}, but the calls need not be wrapped into \code{\link{expression}}. Either argument may also be the name of an object containing an expression, in which case the object will be evaluated in the parent frame to retrieve the expression. The argument of the random number generation functions for the number of variates to simulate \strong{must} be named \code{n}. \code{rcomppois} generates variates from the common Compound Poisson model, that is when random variable \eqn{N} is Poisson distributed with mean \code{lambda}. } \value{ When \code{SIMPLIFY = TRUE}, a vector of aggregate amounts \eqn{S_1, \dots, S_n}. When \code{SIMPLIFY = FALSE}, a list of three elements: \item{\code{aggregate}}{vector of aggregate amounts \eqn{S_1, \dots, S_n};} \item{\code{frequency}}{vector of frequencies \eqn{N_1, \dots, N_n};} \item{\code{severity}}{vector of severities \eqn{X_1, X_2, \dots}.} } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} } \seealso{ \code{\link{rcomphierarc}} to simulate from compound hierarchical models. } \examples{ ## Compound Poisson model with gamma severity. rcompound(10, rpois(2), rgamma(2, 3)) rcomppois(10, 2, rgamma(2, 3)) # same ## Frequencies and individual claim amounts along with aggregate ## values. rcomppois(10, 2, rgamma(2, 3), SIMPLIFY = FALSE) ## Wrapping the simulation models into expression() is allowed, but ## not needed. rcompound(10, expression(rpois(2)), expression(rgamma(2, 3))) \dontrun{## Speed comparison between rcompound() and rcomphierarc(). ## [Also note the simpler syntax for rcompound().] system.time(rcompound(1e6, rpois(2), rgamma(2, 3))) system.time(rcomphierarc(1e6, expression(rpois(2)), expression(rgamma(2, 3))))} ## The severity can itself be a compound model. It makes sense ## in such a case to use a zero-truncated frequency distribution ## for the second level model. rcomppois(10, 2, rcompound(rztnbinom(1.5, 0.7), rlnorm(1.2, 1))) } \keyword{datagen} actuar/man/InverseBurr.Rd0000644000176200001440000001052015147745722015055 0ustar liggesusers\name{InverseBurr} \alias{InverseBurr} \alias{dinvburr} \alias{pinvburr} \alias{qinvburr} \alias{rinvburr} \alias{minvburr} \alias{levinvburr} \title{The Inverse Burr Distribution} \description{ Density function, distribution function, quantile function, random generation, raw moments and limited moments for the Inverse Burr distribution with parameters \code{shape1}, \code{shape2} and \code{scale}. } \usage{ dinvburr(x, shape1, shape2, rate = 1, scale = 1/rate, log = FALSE) pinvburr(q, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) qinvburr(p, shape1, shape2, rate = 1, scale = 1/rate, lower.tail = TRUE, log.p = FALSE) rinvburr(n, shape1, shape2, rate = 1, scale = 1/rate) minvburr(order, shape1, shape2, rate = 1, scale = 1/rate) levinvburr(limit, shape1, shape2, rate = 1, scale = 1/rate, order = 1) } \arguments{ \item{x, q}{vector of quantiles.} \item{p}{vector of probabilities.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{shape1, shape2, scale}{parameters. Must be strictly positive.} \item{rate}{an alternative way to specify the scale.} \item{log, log.p}{logical; if \code{TRUE}, probabilities/densities \eqn{p} are returned as \eqn{\log(p)}{log(p)}.} \item{lower.tail}{logical; if \code{TRUE} (default), probabilities are \eqn{P[X \le x]}{P[X <= x]}, otherwise, \eqn{P[X > x]}.} \item{order}{order of the moment.} \item{limit}{limit of the loss variable.} } \details{ The inverse Burr distribution with parameters \code{shape1} \eqn{= \tau}{= a}, \code{shape2} \eqn{= \gamma}{= b} and \code{scale} \eqn{= \theta}{= s}, has density: \deqn{f(x) = \frac{\tau \gamma (x/\theta)^{\gamma \tau}}{% x [1 + (x/\theta)^\gamma]^{\tau + 1}}}{% f(x) = a b (x/s)^(ba)/(x [1 + (x/s)^b]^(a + 1))} for \eqn{x > 0}, \eqn{\tau > 0}{a > 0}, \eqn{\gamma > 0}{b > 0} and \eqn{\theta > 0}{s > 0}. The inverse Burr is the distribution of the random variable \deqn{\theta \left(\frac{X}{1 - X}\right)^{1/\gamma},}{% s (X/(1 - X))^(1/b),} where \eqn{X} has a beta distribution with parameters \eqn{\tau}{a} and \eqn{1}. The inverse Burr distribution has the following special cases: \itemize{ \item A \link[=dllogis]{Loglogistic} distribution when \code{shape1 == 1}; \item An \link[=dinvpareto]{Inverse Pareto} distribution when \code{shape2 == 1}; \item An \link[=dinvparalogis]{Inverse Paralogistic} distribution when \code{shape1 == shape2}. } The \eqn{k}th raw moment of the random variable \eqn{X} is \eqn{E[X^k]}{E[X^k]}, \eqn{-\tau\gamma < k < \gamma}{-shape1 * shape2 < k < shape2}. The \eqn{k}th limited moment at some limit \eqn{d} is \eqn{E[\min(X, d)^k]}{E[min(X, d)^k]}, \eqn{k > -\tau\gamma}{k > -shape1 * shape2} and \eqn{1 - k/\gamma}{1 - k/shape2} not a negative integer. } \value{ \code{dinvburr} gives the density, \code{invburr} gives the distribution function, \code{qinvburr} gives the quantile function, \code{rinvburr} generates random deviates, \code{minvburr} gives the \eqn{k}th raw moment, and \code{levinvburr} gives the \eqn{k}th moment of the limited loss variable. Invalid arguments will result in return value \code{NaN}, with a warning. } \note{ \code{levinvburr} computes the limited expected value using \code{\link{betaint}}. Also known as the Dagum distribution. See also Kleiber and Kotz (2003) for alternative names and parametrizations. The \code{"distributions"} package vignette provides the interrelations between the continuous size distributions in \pkg{actuar} and the complete formulas underlying the above functions. } \references{ Kleiber, C. and Kotz, S. (2003), \emph{Statistical Size Distributions in Economics and Actuarial Sciences}, Wiley. Klugman, S. A., Panjer, H. H. and Willmot, G. E. (2012), \emph{Loss Models, From Data to Decisions, Fourth Edition}, Wiley. } \author{ Vincent Goulet \email{vincent.goulet@act.ulaval.ca} and Mathieu Pigeon } \examples{ exp(dinvburr(2, 2, 3, 1, log = TRUE)) p <- (1:10)/10 pinvburr(qinvburr(p, 2, 3, 1), 2, 3, 1) ## variance minvburr(2, 2, 3, 1) - minvburr(1, 2, 3, 1) ^ 2 ## case with 1 - order/shape2 > 0 levinvburr(10, 2, 3, 1, order = 2) ## case with 1 - order/shape2 < 0 levinvburr(10, 2, 1.5, 1, order = 2) } \keyword{distribution} actuar/DESCRIPTION0000644000176200001440000000655215151421061013243 0ustar liggesusersPackage: actuar Type: Package Title: Actuarial Functions and Heavy Tailed Distributions Version: 3.3-7 Date: 2026-03-02 Authors@R: c(person("Vincent", "Goulet", role = c("cre", "aut"), email = "vincent.goulet@act.ulaval.ca", comment = c(ORCID = "0000-0002-9315-5719")), person("Sébastien", "Auclair", role = "ctb"), person("Jérémy", "Déraspe", role = "ctb"), person("Christophe", "Dutang", role = "aut", email = "dutang@ceremade.dauphine.fr", comment = c(ORCID = "0000-0001-6732-1501")), person("Walter", "Garcia-Fontes", role = "ctb", email = "walter.garcia@upf.edu"), person("Nicholas", "Langevin", role = "ctb"), person("Xavier", "Milhaud", role = "ctb"), person("Tommy", "Ouellet", role = "ctb"), person("Alexandre", "Parent", role = "ctb"), person("Mathieu", "Pigeon", role = "aut", email = "pigeon.mathieu.2@uqam.ca"), person("Louis-Philippe", "Pouliot", role = "ctb"), person("Jeffrey A.", "Ryan", role = "aut", email = "jeff.a.ryan@gmail.com", comment = "Package API"), person("Robert", "Gentleman", role = "aut", comment = "Parts of the R to C interface"), person("Ross", "Ihaka", role = "aut", comment = "Parts of the R to C interface"), person(given = "R Core Team", role = "aut", comment = "Parts of the R to C interface"), person(given = "R Foundation", role = "aut", comment = "Parts of the R to C interface")) Description: Functions and data sets for actuarial science: modeling of loss distributions; risk theory and ruin theory; simulation of compound models, discrete mixtures and compound hierarchical models; credibility theory. Support for many additional probability distributions to model insurance loss size and frequency: 23 continuous heavy tailed distributions; the Poisson-inverse Gaussian discrete distribution; zero-truncated and zero-modified extensions of the standard discrete distributions. Support for phase-type distributions commonly used to compute ruin probabilities. Main reference: . Implementation of the Feller-Pareto family of distributions: . Depends: R (>= 4.1.0) Imports: stats, graphics, expint LinkingTo: expint Suggests: MASS License: GPL (>= 2) URL: https://gitlab.com/vigou3/actuar BugReports: https://gitlab.com/vigou3/actuar/-/issues Encoding: UTF-8 LazyData: yes Classification/MSC-2010: 62P05, 91B30, 62G32 NeedsCompilation: yes Packaged: 2026-03-02 22:54:40 UTC; vincent Author: Vincent Goulet [cre, aut] (ORCID: ), Sébastien Auclair [ctb], Jérémy Déraspe [ctb], Christophe Dutang [aut] (ORCID: ), Walter Garcia-Fontes [ctb], Nicholas Langevin [ctb], Xavier Milhaud [ctb], Tommy Ouellet [ctb], Alexandre Parent [ctb], Mathieu Pigeon [aut], Louis-Philippe Pouliot [ctb], Jeffrey A. Ryan [aut] (Package API), Robert Gentleman [aut] (Parts of the R to C interface), Ross Ihaka [aut] (Parts of the R to C interface), R Core Team [aut] (Parts of the R to C interface), R Foundation [aut] (Parts of the R to C interface) Maintainer: Vincent Goulet Repository: CRAN Date/Publication: 2026-03-02 23:50:09 UTC