lhs/ 0000755 0001762 0000144 00000000000 14640362712 011045 5 ustar ligges users lhs/tests/ 0000755 0001762 0000144 00000000000 13415250176 012206 5 ustar ligges users lhs/tests/testthat/ 0000755 0001762 0000144 00000000000 14640362712 014047 5 ustar ligges users lhs/tests/testthat/helper-lhs.R 0000644 0001762 0000144 00000002044 14555614046 016242 0 ustar ligges users # Copyright 2019 Robert Carnell
checkLatinHypercube <- function(X)
{
if (any(apply(X, 2, min) <= 0))
return(FALSE)
if (any(apply(X, 2, max) >= 1))
return(FALSE)
if (any(is.na(X)))
return(FALSE)
# check that the matrix is a Latin hypercube
g <- function(Y)
{
# check that this column contains all the cells
breakpoints <- seq(0, 1, length = length(Y) + 1)
h <- hist(Y, plot = FALSE, breaks = breakpoints)
all(h$counts == 1)
}
# check all the columns
return(all(apply(X, 2, g)))
}
checkOA <- function(X)
{
# check that the matrix is an orthogonal array
Y <- t(X) %*% X
all(abs(Y[upper.tri(Y)]) < 1E-9)
}
encodeOA <- function(X, n)
{
stopifnot(n > 1)
stopifnot(is.integer(n))
if (n == 2)
{
# 0, 1 => -1, 1
X <- X*2 - 1
} else if (n == 3)
{
# 0, 1, 2 => -1, 0, 1
X <- X - 1
} else if (n == 4)
{
# 0, 1, 2, 3 => -1, -1/3, 1/3, 1
X <- X * 2 / 3 - 1
} else if (n > 4)
{
X <- X * 2 / (n - 1) - 1
}
return(X)
}
lhs/tests/testthat/test-randomlhs.r 0000644 0001762 0000144 00000005060 14624660656 017211 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-randomlhs")
test_that("randomLHS works", {
A <- randomLHS(4,2)
expect_true(all(A > 0 & A < 1))
expect_equal(4, nrow(A))
expect_equal(2, ncol(A))
expect_true(checkLatinHypercube(A))
# doubles are truncated in n and k
A <- randomLHS(4.4, 2)
expect_true(all(A > 0 & A < 1))
expect_equal(4, nrow(A))
expect_equal(2, ncol(A))
expect_true(checkLatinHypercube(A))
A <- randomLHS(4, 2.8)
expect_true(all(A > 0 & A < 1))
expect_equal(4, nrow(A))
expect_equal(2, ncol(A))
expect_true(checkLatinHypercube(A))
A <- randomLHS(4.4, 2.8)
expect_true(all(A > 0 & A < 1))
expect_equal(4, nrow(A))
expect_equal(2, ncol(A))
expect_true(checkLatinHypercube(A))
expect_error(randomLHS(-1, 2))
expect_error(randomLHS(10, -30))
expect_error(randomLHS(NA, 2))
expect_error(randomLHS(NaN, 2))
expect_warning(expect_error(randomLHS(Inf, 2)))
expect_error(randomLHS(10, NA))
expect_error(randomLHS(10, NaN))
expect_warning(expect_error(randomLHS(10, Inf)))
A <- randomLHS(1, 5)
expect_equal(1, nrow(A))
expect_equal(5, ncol(A))
expect_true(checkLatinHypercube(A))
expect_error(randomLHS(c(1,2,3), c(3,4)))
expect_error(randomLHS(-1, 2, preserveDraw = TRUE))
expect_error(randomLHS(10, -30, preserveDraw = TRUE))
expect_error(randomLHS(NA, 2, preserveDraw = TRUE))
expect_error(randomLHS(NaN, 2, preserveDraw = TRUE))
expect_warning(expect_error(randomLHS(Inf, 2, preserveDraw = TRUE)))
expect_error(randomLHS(10, NA, preserveDraw = TRUE))
expect_error(randomLHS(10, NaN, preserveDraw = TRUE))
expect_warning(expect_error(randomLHS(10, Inf, preserveDraw = TRUE)))
A <- randomLHS(4, 2, preserveDraw = TRUE)
expect_true(all(A > 0 & A < 1))
expect_true(checkLatinHypercube(A))
set.seed(4)
A <- randomLHS(5, 3, preserveDraw = TRUE)
set.seed(4)
B <- randomLHS(5, 5, preserveDraw = TRUE)
expect_equal(A, B[,1:3], tolerance = 1E-12)
expect_true(checkLatinHypercube(A))
expect_true(checkLatinHypercube(B))
expect_error(.Call("randomLHS_cpp", 3, 4L, FALSE))
})
test_that("degenerate LHS problem is fixed", {
A <- randomLHS(1, 3)
expect_true(checkLatinHypercube(A))
})
# in version 1.0.1 and prior, this did not necessarily cause a segfault
# in all attempts. It was a relatively random occurrence
test_that("Segfault does not occur with gctorture", {
for (i in 1:20)
{
gctorture(TRUE)
A <- randomLHS(10, 4)
gctorture(FALSE)
}
expect_true(checkLatinHypercube(A))
})
lhs/tests/testthat/test-optseededlhs.R 0000644 0001762 0000144 00000002626 13423214206 017630 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-optseededlhs")
test_that("optseededLHS works", {
expect_error(optSeededLHS(randomLHS(10, 4), NA))
expect_error(optSeededLHS(randomLHS(10, 4), NaN))
expect_error(optSeededLHS(randomLHS(10, 4), Inf))
expect_error(optSeededLHS(randomLHS(10, 4), 2, NA))
expect_error(optSeededLHS(randomLHS(10, 4), 2, NaN))
expect_warning(expect_error(optSeededLHS(randomLHS(10, 4), 2, Inf)))
expect_error(optSeededLHS(randomLHS(10, 4), 2, 5, NA))
expect_error(optSeededLHS(randomLHS(10, 4), 2, 5, NaN))
expect_error(optSeededLHS(randomLHS(10, 4), 2, 5, Inf))
temp <- randomLHS(10, 4)
temp[1,1] <- NA
expect_error(optSeededLHS(temp, 5))
temp <- randomLHS(10, 4)
temp[1,1] <- 2
expect_error(optSeededLHS(temp, 5))
set.seed(1976)
A <- optSeededLHS(randomLHS(4, 2), 2)
expect_true(checkLatinHypercube(A))
set.seed(1977)
B <- optSeededLHS(randomLHS(3, 3), 3, 3, .05)
expect_true(checkLatinHypercube(B))
A <- optSeededLHS(randomLHS(10, 4), m = 0)
expect_true(checkLatinHypercube(A))
expect_error(.Call("optSeededLHS_cpp", 3, 4L, 4L, 0.01, matrix(1L, 2, 2), FALSE))
X <- .Call("optSeededLHS_cpp", 1L, 4L, 4L, 0.01, matrix(runif(4), nrow = 1, ncol = 4), FALSE)
expect_equal(nrow(X), 1)
expect_true(checkLatinHypercube(X))
expect_error(.Call("optSeededLHS_cpp", 3L, 4L, 4L, 0.01, matrix(1L, 2, 2), FALSE))
})
lhs/tests/testthat/test-correlatedLHS.R 0000644 0001762 0000144 00000006441 14624750672 017657 0 ustar ligges users # Copyright 2024 Robert Carnell
context("test-correlatedLHS")
test_that("Normal Operations", {
temp <- correlatedLHS(randomLHS(10, 3),
marginal_transform_function = function(x, ...) {
x[,1] <- qunif(x[,1], 3, 6)
return(x)
},
cost_function = function(x, ...) {
(cor(x[,1], x[,2]) - 0.8)^2
})
expect_true(is.numeric(temp$cost))
expect_true(is.matrix(temp$lhs))
expect_true(is.matrix(temp$transformed_lhs))
# debug
expect_output(temp <- correlatedLHS(randomLHS(10, 3),
marginal_transform_function = function(x, ...) {
x[,1] <- qunif(x[,1], 3, 6)
return(x)
},
cost_function = function(x, ...) {
(cor(x[,1], x[,2]) - 0.8)^2
}, debug = TRUE))
})
test_that("problems", {
# bad marginal_transform_function return
# not a data.frame or matrix
expect_error({
correlatedLHS(randomLHS(10, 3),
marginal_transform_function = function(x, ...) {
return(NA)
},
cost_function = function(x, ...) {
return(1)
})
})
# wrong dimensions
expect_error({
correlatedLHS(randomLHS(10, 3),
marginal_transform_function = function(x, ...) {
return(matrix(1, nrow = 2, ncol = 2))
},
cost_function = function(x, ...) {
return(1)
})
})
# bad cost_function return
expect_error({
correlatedLHS(randomLHS(10, 3),
marginal_transform_function = function(x, ...) {
return(x)
},
cost_function = function(x, ...) {
return("eight")
})
})
# bad marginal_transform_function without ... with a passed in var
expect_error({
correlatedLHS(randomLHS(10, 3),
marginal_transform_function = function(x) {
return(x)
},
cost_function = function(x, ...) {
return(1)
},
test_var = "A")
})
# bad cost_function without ... with a passed in var
expect_error({
correlatedLHS(randomLHS(10, 3),
marginal_transform_function = function(x, ...) {
return(x)
},
cost_function = function(x) {
return(1)
},
test_var = "A")
})
# bad lhs
expect_output(
expect_error(
temp <- correlatedLHS(matrix(-1, nrow = 2, ncol = 2),
marginal_transform_function = function(x, ...) {
return(x)
},
cost_function = function(x, ...) {
return(sum(x))
}, debug = TRUE)
)
)
})
lhs/tests/testthat/test-optimumlhs.R 0000644 0001762 0000144 00000003552 13423217051 017346 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-optimumlhs")
test_that("optimumLHS works", {
expect_error(optimumLHS(-1, 2))
expect_error(optimumLHS(10, -30))
expect_error(optimumLHS(10, 2, -2))
expect_error(optimumLHS(10, 2, 3, -1))
expect_error(optimumLHS(10, 2, 3, 1.5))
expect_error(optimumLHS(10, 2, 3, 1))
expect_error(optimumLHS(10, 2, 3, 0))
expect_error(optimumLHS(NA, 2))
expect_error(optimumLHS(NaN, 2))
expect_warning(expect_error(optimumLHS(Inf, 2)))
expect_error(optimumLHS(10, NA))
expect_error(optimumLHS(10, NaN))
expect_warning(expect_error(optimumLHS(10, Inf)))
expect_error(optimumLHS(10, 2, NA))
expect_error(optimumLHS(10, 2, NaN))
expect_warning(expect_error(optimumLHS(10, 2, Inf)))
expect_error(optimumLHS(10, 2, 2, NA))
expect_error(optimumLHS(10, 2, 2, NaN))
expect_error(optimumLHS(10, 2, 2, Inf))
set.seed(1976)
rTemp <- optimumLHS(4, 2)
expect_true(checkLatinHypercube(rTemp))
set.seed(1977)
rTemp <- optimumLHS(3, 3, 5)
expect_true(checkLatinHypercube(rTemp))
set.seed(1978)
rTemp <- optimumLHS(5, 2, 5, .5)
expect_true(checkLatinHypercube(rTemp))
set.seed(2010)
for (i in 2:6)
{
for (j in 2:6)
{
A <- optimumLHS(i, j)
expect_true(checkLatinHypercube(A))
}
}
set.seed(2011)
for (i in 2:6)
{
for (j in 2:6)
{
A <- optimumLHS(i, j, 5)
expect_true(checkLatinHypercube(A))
}
}
set.seed(2012)
for (i in 2:6)
{
for (j in 2:6)
{
A <- optimumLHS(i, j, 5, 0.05)
expect_true(checkLatinHypercube(A))
}
}
expect_error(.Call("optimumLHS_cpp", 3, 4L, 4L, 0.01, FALSE))
X <- .Call("optimumLHS_cpp", 1L, 4L, 4L, 0.01, FALSE)
expect_equal(nrow(X), 1)
A <- optimumLHS(1, 4)
expect_equal(nrow(A), 1)
expect_true(checkLatinHypercube(A))
})
lhs/tests/testthat/test-geneticlhs.R 0000644 0001762 0000144 00000003013 13423214736 017271 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-geneticlhs")
test_that("geneticLHS works", {
expect_error(geneticLHS(-1, 2))
expect_error(geneticLHS(10, -30))
expect_error(geneticLHS(10, 2, -2))
expect_error(geneticLHS(NA, 2))
expect_error(geneticLHS(NaN, 2))
expect_warning(expect_error(geneticLHS(Inf, 2)))
expect_error(geneticLHS(10, NA))
expect_error(geneticLHS(10, NaN))
expect_warning(expect_error(geneticLHS(10, Inf)))
expect_error(geneticLHS(10, 2, NA))
expect_error(geneticLHS(10, 2, NaN))
expect_warning(expect_error(geneticLHS(10, 2, Inf)))
set.seed(1976)
expect_true(checkLatinHypercube(geneticLHS(4, 2)))
set.seed(1977)
expect_true(checkLatinHypercube(geneticLHS(3, 3, 6)))
expect_error(geneticLHS(10, 2, 4, -1))
expect_error(geneticLHS(10, 2, 4, 4, -.1))
expect_error(geneticLHS(10, 2, 4, 4, 1.1))
expect_error(geneticLHS(10, 2, 2, NA))
expect_error(geneticLHS(10, 2, 2, NaN))
expect_warning(expect_error(geneticLHS(10, 2, 2, Inf)))
#expect_error(geneticLHS(10, 2, 2, 4, NA))
#expect_error(geneticLHS(10, 2, 2, 4, NaN))
expect_error(geneticLHS(10, 2, 2, 4, Inf))
set.seed(1976)
expect_true(checkLatinHypercube(geneticLHS(20, 5, pop = 100, gen = 4,
pMut = 0.2, criterium = "S")))
capture_output(X <- .Call("geneticLHS_cpp", 1L, 4L, 10L, 4L, 0.01, "S", TRUE))
expect_equal(nrow(X), 1)
A <- geneticLHS(1, 4)
expect_equal(nrow(A), 1)
expect_true(checkLatinHypercube(A))
})
lhs/tests/testthat/test-quantile_transforms.R 0000644 0001762 0000144 00000004312 14635432456 021256 0 ustar ligges users # Copyright 2024 Robert Carnell
context("test-quantile_transformations")
test_that("qfactor works", {
p <- randomLHS(n=5, k=1)
fact <- factor(LETTERS[1:4])
res <- qfactor(p, fact)
expect_true(all(levels(res) %in% levels(fact)))
expect_true(all(fact[floor(p[,1]*nlevels(fact)) + 1] == res))
p <- randomLHS(n=5, k=1)
fact <- factor(LETTERS[1:4], levels = LETTERS[4:1], ordered = TRUE)
res <- qfactor(p, fact)
expect_true(all(levels(res) %in% levels(fact)))
expect_true(all(levels(fact)[floor(p[,1]*nlevels(fact)) + 1] == as.character(res)))
p <- randomLHS(n=25, k=1)
fact <- factor(LETTERS[1:5])
res <- qfactor(p, fact)
expect_true(all(levels(res) %in% levels(fact)))
expect_true(all(fact[floor(p[,1]*nlevels(fact)) + 1] == res))
expect_equivalent(rep(5, 5), c(table(res)))
p <- randomLHS(n=25, k=1)
fact <- ordered(LETTERS[1:5])
res <- qfactor(p, fact)
expect_true(all(levels(res) %in% levels(fact)))
expect_true(all(levels(fact)[floor(p[,1]*nlevels(fact)) + 1] == as.character(res)))
expect_equivalent(rep(5, 5), c(table(res)))
expect_error(qfactor("a", factor("a")))
expect_error(qfactor(c(0.1, 0.2), "a"))
expect_error(qfactor(1.1, factor("a")))
expect_error(qfactor(-3, factor("a")))
})
test_that("qinteger works", {
p <- randomLHS(n = 25, k = 1)
res <- qinteger(p, 6, 12)
expect_equal(6, min(res))
expect_equal(12, max(res))
expect_true(all(res %in% 6:12))
p <- randomLHS(n = 25, k = 1)
res <- qinteger(p, -4L, 2L)
expect_equal(-4, min(res))
expect_equal(2, max(res))
expect_true(all(res %in% -4:2))
expect_error(qinteger("a", 1, 5))
expect_error(qinteger(c(0.1, 0.2), 1.1, 5))
expect_error(qinteger(c(0.1, 0.2), 1, 5.2))
expect_error(qinteger(c(0.1, 0.2), 8, 5))
expect_error(qinteger(1.1, factor("a")))
expect_error(qinteger(-3, factor("a")))
})
test_that("qdirichlet works", {
set.seed(19753)
X <- randomLHS(500, 5)
Y <- X
Y[,1:3] <- qdirichlet(X[,1:3], rep(2,3))
Y[,4] <- qnorm(X[,4], 2, 1)
Y[,5] <- qunif(X[,5], 1, 3)
expect_equal(rep(1,500), rowSums(Y[,1:3]))
expect_error(qdirichlet(X[,1:3], rep(2, 7)))
expect_error(qdirichlet(X[,1:3], c(1, NA, 7)))
})
lhs/tests/testthat/test-create_oalhs.R 0000644 0001762 0000144 00000002526 13420500406 017572 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-create_oalhs")
test_that("create_oalhs works", {
oalhs <- create_oalhs(9, 4, TRUE, FALSE)
expect_true(checkLatinHypercube(oalhs))
expect_equal(nrow(oalhs), 9)
expect_equal(ncol(oalhs), 4)
# ask for an achievable design
oalhs <- create_oalhs(4, 2, TRUE, FALSE)
expect_true(checkLatinHypercube(oalhs))
expect_equal(nrow(oalhs), 4)
expect_equal(ncol(oalhs), 2)
# ask for a design that needs more rows
oalhs <- create_oalhs(20, 3, TRUE, FALSE)
expect_true(checkLatinHypercube(oalhs))
expect_equal(nrow(oalhs), 25)
expect_equal(ncol(oalhs), 3)
# ask for a design but ask for less rows
oalhs <- create_oalhs(20, 3, FALSE, FALSE)
expect_true(checkLatinHypercube(oalhs))
expect_equal(nrow(oalhs), 18)
expect_equal(ncol(oalhs), 3)
oalhs <- create_oalhs(20, 10, TRUE, FALSE)
expect_true(checkLatinHypercube(oalhs))
expect_equal(nrow(oalhs), 54)
expect_equal(ncol(oalhs), 10)
# check repeatability
set.seed(1001)
X <- create_oalhs(9, 4, TRUE, FALSE)
set.seed(1001)
Y <- create_oalhs(9, 4, TRUE, FALSE)
expect_true(all(X == Y))
expect_error(.Call("create_oalhs", 3, 4L, FALSE, FALSE))
expect_error(.Call("create_oalhs", 3L, 4L, 5, FALSE))
expect_error(.Call("create_oalhs", 3L, as.integer(NA), FALSE, FALSE))
})
lhs/tests/testthat/test-galois_field.R 0000644 0001762 0000144 00000014052 14624747467 017612 0 ustar ligges users # Copyright 2024 Robert Carnell
context("test-galois_field")
test_that("Galois Fields Work", {
test_field <- function(q, p, n)
{
gf <- create_galois_field(q)
expect_equal(gf$p, p)
expect_equal(gf$q, q)
expect_equal(gf$n, n)
}
test_field(2, 2, 1)
test_field(3, 3, 1)
test_field(4, 2, 2)
test_field(9, 3, 2)
# q cannot be less than 2
expect_error(create_galois_field(-1))
expect_error(create_galois_field(1))
# q must be a prime power
expect_error(create_galois_field(100000000))
expect_error(create_galois_field(35))
# the prime power can't be too large
expect_error(create_galois_field(2^30))
})
test_that("Associative", {
check_associative <- function(gf){
for (i in 1:gf$q)
{
for (j in 1:gf$q)
{
for (k in 1:gf$q)
{
sum1 <- poly_sum(gf$p, gf$n, gf$poly[i,], gf$poly[j,])
sum2 <- poly_sum(gf$p, gf$n, sum1, gf$poly[k,])
sum3 <- poly_sum(gf$p, gf$n, gf$poly[j,], gf$poly[k,])
sum4 <- poly_sum(gf$p, gf$n, gf$poly[i,], sum3)
expect_equal(sum2, sum4)
prod1 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], gf$poly[j,])
prod2 <- poly_prod(gf$p, gf$n, gf$xton, prod1, gf$poly[k,])
prod3 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[j,], gf$poly[k,])
prod4 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], prod3)
expect_equal(prod2, prod4)
}
}
}
}
check_associative(create_galois_field(3))
check_associative(create_galois_field(4))
check_associative(create_galois_field(9))
check_associative(create_galois_field(8))
})
test_that("Commutative", {
check_commutative <- function(gf)
{
for (i in 1:gf$q)
{
for (j in 1:gf$q)
{
sum1 <- poly_sum(gf$p, gf$n, gf$poly[i,], gf$poly[j,])
sum2 <- poly_sum(gf$p, gf$n, gf$poly[j,], gf$poly[i,])
expect_equal(sum1, sum2)
prod1 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], gf$poly[j,])
prod2 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[j,], gf$poly[i,])
expect_equal(prod1, prod2)
}
}
}
check_commutative(create_galois_field(3))
check_commutative(create_galois_field(4))
check_commutative(create_galois_field(9))
check_commutative(create_galois_field(8))
})
test_that("Identity", {
check_identity <- function(gf, zero, one)
{
for (i in 1:gf$q)
{
sum1 <- poly_sum(gf$p, gf$n, gf$poly[i,], zero)
expect_equal(sum1, gf$poly[i,])
prod1 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], one)
expect_equal(prod1, gf$poly[i,])
}
}
check_identity(create_galois_field(3), 0, 1)
check_identity(create_galois_field(4), c(0,0), c(1,0))
check_identity(create_galois_field(9), c(0,0), c(1,0))
check_identity(create_galois_field(8), c(0,0,0), c(1,0,0))
})
test_that("Inverse", {
check_inverse <- function(gf, zero, one)
{
#gf <- create_galois_field(3)
for (i in 1:gf$q)
{
#i <- 1
sum1 <- poly_sum(gf$p, gf$n, gf$poly[i,], gf$poly[gf$neg[i] + 1,])
expect_equal(sum1, zero)
if (!is.na(gf$inv[i]))
{
prod1 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], gf$poly[gf$inv[i] + 1,])
expect_equal(prod1, one)
}
}
}
check_inverse(create_galois_field(3), 0, 1)
check_inverse(create_galois_field(4), c(0,0), c(1,0))
check_inverse(create_galois_field(9), c(0,0), c(1,0))
check_inverse(create_galois_field(8), c(0,0,0), c(1,0,0))
})
test_that("Distributive", {
check_distributive <- function(gf)
{
for (i in 1:gf$q)
{
for (j in 1:gf$q)
{
for (k in 1:gf$q)
{
sum1 <- poly_sum(gf$p, gf$n, gf$poly[j,], gf$poly[k,])
prod1 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], sum1)
prod2 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], gf$poly[j,])
prod3 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[i,], gf$poly[k,])
sum2 <- poly_sum(gf$p, gf$n, prod2, prod3)
expect_equal(prod1, sum2)
}
}
}
}
check_distributive(create_galois_field(3))
check_distributive(create_galois_field(4))
check_distributive(create_galois_field(9))
check_distributive(create_galois_field(8))
})
test_that("Roots", {
check_roots <- function(gf)
{
#gf <- create_galois_field(3)
for (i in 1:gf$q)
{
if (!is.na(gf$root[i])){
prod1 <- poly_prod(gf$p, gf$n, gf$xton, gf$poly[gf$root[i] + 1,], gf$poly[gf$root[i] + 1,])
expect_equal(gf$poly[i, ], prod1)
}
}
}
check_roots(create_galois_field(3))
check_roots(create_galois_field(4))
check_roots(create_galois_field(9))
check_roots(create_galois_field(8))
})
test_that("poly_prod", {
gf <- create_galois_field(4)
# poly_prod(gf$p, gf$n, gf$xton, gf$poly[1,], gf$poly[2,])
# n != length(xton)
expect_error(poly_prod(gf$p, gf$n, gf$xton[1], gf$poly[1,], gf$poly[2,]))
# n != length(p1)
expect_error(poly_prod(gf$p, gf$n, gf$xton, gf$poly[1,1], gf$poly[2,]))
# n != length(p2)
expect_error(poly_prod(gf$p, gf$n, gf$xton, gf$poly[1,], gf$poly[2,1]))
# entries of polynomials > p
expect_error(poly_prod(gf$p, gf$n, gf$xton, c(7,7), gf$poly[2,]))
# entries of polynomials < 0
expect_error(poly_prod(gf$p, gf$n, gf$xton, c(-7,-7), gf$poly[2,]))
})
test_that("poly_sum", {
gf <- create_galois_field(4)
# n != length(p1)
expect_error(poly_sum(gf$p, gf$n, gf$poly[1,1], gf$poly[2,]))
# n != length(p2)
expect_error(poly_sum(gf$p, gf$n, gf$poly[1,], gf$poly[2,1]))
# entries of polynomials > p
expect_error(poly_sum(gf$p, gf$n, c(7,7), gf$poly[2,]))
# entries of polynomials < 0
expect_error(poly_sum(gf$p, gf$n, c(-7,-7), gf$poly[2,]))
})
test_that("poly2int", {
gf <- create_galois_field(4)
# n != length(poly)
expect_error(poly2int(gf$p, gf$n, gf$poly[1,1]))
# entries of polynomials > p
expect_error(poly2int(gf$p, gf$n, c(7,7)))
# entries of polynomials < 0
expect_error(poly2int(gf$p, gf$n, c(-7,-7)))
})
lhs/tests/testthat/test-improvedlhs.r 0000644 0001762 0000144 00000002277 13423215476 017555 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-improvedlhs")
test_that("improvedLHS works", {
expect_error(improvedLHS(-1, 2))
expect_error(improvedLHS(10, -30))
expect_error(improvedLHS(10, 2, -2))
expect_error(improvedLHS(NA, 2))
expect_error(improvedLHS(NaN, 2))
expect_warning(expect_error(improvedLHS(Inf, 2)))
expect_error(improvedLHS(10, NA, 2))
expect_error(improvedLHS(10, NaN, 2))
expect_warning(expect_error(improvedLHS(10, Inf, 2)))
expect_error(improvedLHS(10, 2, NA))
expect_error(improvedLHS(10, 2, NaN))
expect_warning(expect_error(improvedLHS(10, 2, Inf)))
set.seed(1976)
expect_true(checkLatinHypercube(improvedLHS(4, 2)))
set.seed(1977)
expect_true(checkLatinHypercube(improvedLHS(3, 3, 5)))
set.seed(1111)
A <- improvedLHS(20, 6)
set.seed(1111)
B <- improvedLHS(20, 6)
expect_true(all(A == B))
D <- improvedLHS(20, 6)
expect_true(any(A != D))
A <- improvedLHS(1, 4)
expect_equal(nrow(A), 1)
expect_true(checkLatinHypercube(A))
})
test_that("improvedLHS errors work", {
expect_error(.Call("improvedLHS_cpp", 3, 4L, 4L))
X <- .Call("improvedLHS_cpp", 1L, 4L, 4L)
expect_equal(nrow(X), 1)
})
lhs/tests/testthat/test-maximinlhs.R 0000644 0001762 0000144 00000005610 13423215572 017321 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-maximinlhs")
test_that("maximinLHS works", {
expect_error(maximinLHS(-1, 2))
expect_error(maximinLHS(10, -30))
expect_error(maximinLHS(10, 2, dup = -2))
expect_error(maximinLHS(NA, 2))
expect_error(maximinLHS(NaN, 2))
expect_warning(expect_error(maximinLHS(Inf, 2)))
expect_error(maximinLHS(10, NA))
expect_error(maximinLHS(10, NaN))
expect_warning(expect_error(maximinLHS(10, Inf)))
expect_error(maximinLHS(10, 2, dup = NA))
expect_error(maximinLHS(10, 2, dup = NaN))
expect_warning(expect_error(maximinLHS(10, 2, dup = Inf)))
set.seed(1976)
expect_true(checkLatinHypercube(maximinLHS(4, 2)))
set.seed(1977)
expect_true(checkLatinHypercube(maximinLHS(3, 3, dup = 5)))
expect_error(maximinLHS(10, 4, method = "none"))
expect_error(maximinLHS(10, 4, method = "build", optimize.on = "none"))
expect_warning(maximinLHS(10, 4, method = "build", optimize.on = "result"))
expect_error(maximinLHS(10, c(4,5), method = "iterative"))
expect_error(maximinLHS(10, NA, method = "iterative"))
expect_error(maximinLHS(10, Inf, method = "iterative"))
expect_error(maximinLHS(12.2, 4, method = "iterative"))
expect_error(maximinLHS(12, 4.3, method = "iterative"))
expect_error(maximinLHS(12, 4, dup = 10.2, method = "iterative"))
A <- maximinLHS(12, 4, dup = 10, method = "iterative", optimize.on = "result")
expect_true(checkLatinHypercube(A))
A <- maximinLHS(20, 5, dup = 3, method = "iterative", optimize.on = "grid")
expect_true(checkLatinHypercube(A))
A <- maximinLHS(1, 4)
expect_equal(nrow(A), 1)
expect_true(checkLatinHypercube(A))
})
test_that("maximinLHS works with expanded capability", {
expect_true(checkLatinHypercube(maximinLHS(4, 3, method = "build", dup = 2)))
expect_true(checkLatinHypercube(maximinLHS(4, 3, method = "iterative", eps = 0.05, maxIter = 100, optimize.on = "grid")))
expect_true(checkLatinHypercube(maximinLHS(4, 3, method = "iterative", eps = 0.05, maxIter = 100, optimize.on = "result")))
})
test_that("maximinLHS debug capability for code coverage", {
capture_output(X <- maximinLHS(10, 4, method = "build",
optimize.on = "grid", debug = TRUE))
expect_equal(nrow(X), 10)
expect_warning(capture_output(X <- maximinLHS(10, 4, method = "build",
optimize.on = "result", debug = TRUE)))
expect_equal(nrow(X), 10)
capture_output(X <- maximinLHS(10, 10, method = "iterative",
optimize.on = "result", eps = 1E-9, debug = TRUE))
expect_equal(nrow(X), 10)
capture_output(X <- maximinLHS(5, 5, method = "iterative",
optimize.on = "result", eps = 1, debug = TRUE))
expect_error(.Call("maximinLHS_cpp", 3, 4L, 4L))
X <- .Call("maximinLHS_cpp", 1L, 4L, 4L)
expect_equal(nrow(X), 1)
})
lhs/tests/testthat/test-createoa.R 0000644 0001762 0000144 00000013625 14623232437 016742 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-createoa")
test_that("createBose works", {
B <- createBose(2, 3, FALSE)
expect_true(checkOA(encodeOA(B, 2L)))
B <- createBose(3, 4, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createBose(3, 3)
expect_equal(nrow(B), 3^2)
expect_equal(ncol(B), 3)
B <- createBose(3, 4)
expect_equal(nrow(B), 3^2)
expect_equal(ncol(B), 4)
B <- createBose(5, 4)
expect_equal(nrow(B), 5^2)
expect_equal(ncol(B), 4)
# Error: q, ncol, and n should be integers
expect_error(.Call("oa_type1", "bose", 3, 3L, FALSE))
# Error: type should be a character and bRandom should be a logical
expect_error(.Call("oa_type1", 0, 3L, 3L, FALSE))
# Error: q, ncol, and bRandom can only be of length 1
expect_error(.Call("oa_type1", "bose", c(3L, 4L), 3L, FALSE))
# Error: q, ncol, and bRandom are not permitted to be NA
expect_error(.Call("oa_type1", "bose", as.integer(NA), 3L, FALSE))
# Error: bob is an Unrecognized orthogonal array algorithm
expect_error(.Call("oa_type1", "bob", 3L, 3L, FALSE))
})
test_that("createBoseBush works", {
B <- createBoseBush(2, 4, FALSE)
expect_true(checkOA(encodeOA(B, 2L)))
B <- createBoseBush(4, 8, FALSE)
expect_true(checkOA(encodeOA(B, 4L)))
B <- createBoseBush(4, 5)
expect_equal(nrow(B), 2*4^2)
expect_equal(ncol(B), 5)
B <- createBoseBush(4, 4)
expect_equal(nrow(B), 2*4^2)
expect_equal(ncol(B), 4)
B <- createBoseBush(8, 3)
expect_equal(nrow(B), 2*8^2)
expect_equal(ncol(B), 3)
# Warning message:
# In createBoseBush(8, 17) :
# Warning: The Bose-Bush construction with ncol = 2q+1
# has a defect. While it is still an OA(2q^2,2q+1,q,2),
# there exist some pairs of rows that agree in three columns.
expect_warning({
B <- createBoseBush(8, 17)
})
expect_equal(nrow(B), 2*8^2)
expect_equal(ncol(B), 17)
expect_true(checkOA(encodeOA(B, 8L)))
expect_error({
B <- createBoseBush(8, 18)
})
})
test_that("createBush works", {
B <- createBush(3, 3, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createBush(4, 5, FALSE)
expect_true(checkOA(encodeOA(B, 4L)))
B <- createBush(3, 3)
expect_equal(nrow(B), 3^3)
expect_equal(ncol(B), 3)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createBush(3, 4)
expect_equal(nrow(B), 3^3)
expect_equal(ncol(B), 4)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createBush(5, 4)
expect_equal(nrow(B), 5^3)
expect_equal(ncol(B), 4)
expect_true(checkOA(encodeOA(B, 5L)))
expect_warning({
B <- createBush(2, 3)
})
expect_equal(nrow(B), 2^3)
expect_equal(ncol(B), 3)
expect_true(checkOA(encodeOA(B, 2L)))
expect_error({
B <- createBush(2, 4)
})
})
test_that("createAddelKemp works", {
B <- createAddelKemp(2, 4, FALSE)
expect_true(checkOA(encodeOA(B, 2L)))
B <- createAddelKemp(3, 6, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createAddelKemp(4, 5)
expect_equal(nrow(B), 2*4^2)
expect_equal(ncol(B), 5)
B <- createAddelKemp(4, 4)
expect_equal(nrow(B), 2*4^2)
expect_equal(ncol(B), 4)
B <- createAddelKemp(5, 3)
expect_equal(nrow(B), 2*5^2)
expect_equal(ncol(B), 3)
expect_warning({
B <- createAddelKemp(q = 3^1, ncol = 2*3 + 1, bRandom = FALSE)
}, regexp = "Warning:")
expect_true(checkOA(encodeOA(B, 3L)))
expect_equal(7, ncol(B))
expect_equal(18, nrow(B))
})
test_that("createAddelKemp3 works", {
B <- createAddelKemp3(2, 13, FALSE)
expect_true(checkOA(encodeOA(B, 2L)))
B <- createAddelKemp3(3, 25, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createAddelKemp3(4, 5)
expect_equal(nrow(B), 2*4^3)
expect_equal(ncol(B), 5)
B <- createAddelKemp3(4, 4)
expect_equal(nrow(B), 2*4^3)
expect_equal(ncol(B), 4)
B <- createAddelKemp3(5, 3)
expect_equal(nrow(B), 2*5^3)
expect_equal(ncol(B), 3)
})
test_that("createBusht works", {
B <- createBusht(3, 4, 2, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createBusht(3, 4, 3, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
expect_error(.Call("oa_type2", "busht", 3, 4L, 3L, FALSE))
expect_error(.Call("oa_type2", 0, 3L, 4L, 3L, FALSE))
expect_error(.Call("oa_type2", "busht", c(3L, 4L), 4L, 3L, FALSE))
expect_error(.Call("oa_type2", "busht", as.integer(NA), 4L, 3L, FALSE))
expect_error(.Call("oa_type2", "bosebushl", as.integer(NA), 4L, 3L, FALSE))
expect_error(.Call("oa_type2", "addelkempn", as.integer(NA), 4L, 3L, FALSE))
expect_error(.Call("oa_type2", "bob", as.integer(NA), 4L, 3L, FALSE))
expect_error(.Call("oa_type2", "bob", 3L, 4L, 3L, FALSE))
X <- .Call("oa_type2", "busht", 3L, 4L, 3L, TRUE)
expect_equal(nrow(X), 64)
})
test_that("createBoseBushl works", {
B <- createBoseBushl(3, 5, 3, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createBoseBushl(4, 4, 16, FALSE)
expect_true(checkOA(encodeOA(B, 4L)))
B <- createBoseBushl(q = 2^2, ncol = 2*2^2, lambda = 2, bRandom = FALSE)
expect_true(checkOA(encodeOA(B, 4L)))
expect_warning({
B <- createBoseBushl(q = 2^2, ncol = 2*2^2 + 1, lambda = 2, bRandom = FALSE)
}, regexp = "Warning:")
expect_true(checkOA(encodeOA(B, 4L)))
expect_equal(9, ncol(B))
expect_equal(32, nrow(B))
expect_warning({
B <- createBoseBushl(q = 3^1, ncol = 3*3 + 1, lambda = 3, bRandom = FALSE)
}, regexp = "Warning:")
expect_true(checkOA(encodeOA(B, 3L)))
expect_equal(10, ncol(B))
expect_equal(27, nrow(B))
})
test_that("createAddelKempN works", {
B <- createAddelKempN(2, 3, 3, FALSE)
expect_true(checkOA(encodeOA(B, 2L)))
B <- createAddelKempN(3, 4, 4, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
# These two tests were failing prior to lhs 1.1
B <- createAddelKempN(3, 5, 3, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
B <- createAddelKempN(3, 25, 3, FALSE)
expect_true(checkOA(encodeOA(B, 3L)))
})
lhs/tests/testthat/test-get_library_versions.R 0000644 0001762 0000144 00000000357 14616004222 021377 0 ustar ligges users # Copyright 2024 Robert Carnell
context("test-get_library_versions")
test_that("get_library_versions", {
a <- get_library_versions()
expect_true(is.character(a))
expect_equal(1, length(a))
expect_true(nchar(a) > 10)
})
lhs/tests/testthat/test-optaugmentlhs.R 0000644 0001762 0000144 00000002033 13423216603 020032 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-optaugmentlhs")
test_that("optAugmentLHS works", {
expect_error(optAugmentLHS(randomLHS(10, 4), NA))
expect_error(optAugmentLHS(randomLHS(10, 4), NaN))
expect_error(optAugmentLHS(randomLHS(10, 4), Inf))
expect_error(optAugmentLHS(randomLHS(10, 4), 2, NA))
expect_error(optAugmentLHS(randomLHS(10, 4), 2, NaN))
expect_error(optAugmentLHS(randomLHS(10, 4), 2, Inf))
temp <- randomLHS(10, 4)
temp[1,1] <- NA
expect_error(optAugmentLHS(temp, 5))
temp <- randomLHS(10, 4)
temp[1,1] <- 2
expect_error(optAugmentLHS(temp, 5))
set.seed(1976)
expect_true(checkLatinHypercube(optAugmentLHS(randomLHS(4, 2), 2)))
set.seed(1977)
expect_true(checkLatinHypercube(optAugmentLHS(randomLHS(3, 3), 3, 3)))
expect_error(optAugmentLHS(c(1,2), m = 4, mult = 2))
expect_error(optAugmentLHS(randomLHS(10, 4), c(1,2)))
expect_error(optAugmentLHS(randomLHS(10, 4), -2))
A <- optAugmentLHS(randomLHS(1,4), 1)
expect_true(checkLatinHypercube(A))
})
lhs/tests/testthat/test-augmentlhs.R 0000644 0001762 0000144 00000002473 13423214555 017323 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-augmentlhs")
test_that("augment works", {
expect_error(augmentLHS(randomLHS(10, 4), NA))
expect_error(augmentLHS(randomLHS(10, 4), NaN))
expect_error(augmentLHS(randomLHS(10, 4), Inf))
set.seed(1976)
temp <- randomLHS(10, 4)
temp[1,1] <- NA
expect_error(augmentLHS(temp, 5))
set.seed(1976)
temp <- randomLHS(10, 4)
temp[1,1] <- 2
expect_error(augmentLHS(temp, 5))
set.seed(1976)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(4, 2), 4)))
set.seed(1977)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(3, 3), 3)))
set.seed(1977)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(4, 1), 2)))
# this test addresses a bug where an error ocurred on adding 1 row in
# augmentLHS
temp <- randomLHS(7, 2)
temp <- augmentLHS(temp, 1)
expect_equal(nrow(temp), 8)
expect_true(checkLatinHypercube(augmentLHS(randomLHS(7, 2), 7)))
expect_true(checkLatinHypercube(augmentLHS(randomLHS(10, 5), 10)))
# test exceptions
expect_error(augmentLHS(c(1,2), 5))
expect_error(augmentLHS(randomLHS(10,3), c(5,9)))
expect_error(augmentLHS(randomLHS(10,3), -1))
expect_error(augmentLHS(randomLHS(10,3), 2.2))
A <- augmentLHS(randomLHS(1,4), 1)
expect_true(checkLatinHypercube(A))
})
lhs/tests/testthat/test-oa_to_oalhs.R 0000644 0001762 0000144 00000005252 13425356377 017454 0 ustar ligges users # Copyright 2019 Robert Carnell
context("test-oa_to_oalhs")
test_that("oa_to_oalhs works with internal oa generation", {
oa <- createBose(3, 4, TRUE)
oalhs <- oa_to_oalhs(9, 4, oa)
expect_true(checkLatinHypercube(oalhs))
oa <- createBose(3, 4, FALSE)
oalhs <- oa_to_oalhs(9, 4, oa)
expect_true(checkLatinHypercube(oalhs))
oa <- createBoseBush(8, 5, TRUE)
oalhs <- oa_to_oalhs(128, 5, oa)
expect_true(checkLatinHypercube(oalhs))
# check a mismatch in n causes an error
expect_error(oa_to_oalhs(3, 5, oa))
# check a mismatch in k causes an error
expect_error(oa_to_oalhs(128, 8, oa))
# check a wrong sized oa causes an error
expect_error(oa_to_oalhs(128, 5, oa[1:100,1:3]))
# check wrong type
expect_error(oa_to_oalhs(3, 5, matrix(1.2, nrow = 3, ncol = 5)))
oa <- createAddelKemp(3, 4, FALSE)
oalhs <- oa_to_oalhs(18, 4, oa)
expect_true(checkLatinHypercube(oalhs))
oa <- createAddelKemp3(4, 20, TRUE)
oalhs <- oa_to_oalhs(128, 20, oa)
expect_true(checkLatinHypercube(oalhs))
expect_error(.Call("oa_to_lhs", 4, 20L, oa, FALSE))
expect_error(.Call("oa_to_lhs", 4L, 20L, oa, 5))
expect_error(.Call("oa_to_lhs", as.integer(NA), 20L, oa, FALSE))
})
test_that("oa_to_oalhs works with DoE.base", {
# note: trying to ensure that a lack of DoE.base does not break the tests
# also trying to avoid attaching the DoE.base package because it causes warnings in the test suite
testthat::skip_if_not_installed("DoE.base")
# 12 rows, two columns of 1,2 and one column of 1:6
my_oa <- DoE.base::oa.design(ID = DoE.base::L12.2.2.6.1)
oalhs <- oa_to_oalhs(12, 3, my_oa)
expect_true(checkLatinHypercube(oalhs))
# 20 rows, 19 columns of 1,2
my_oa <- DoE.base::oa.design(ID = DoE.base::L20.2.19)
oalhs <- oa_to_oalhs(20, 19, my_oa)
expect_true(checkLatinHypercube(oalhs))
# can I get the oa back?
#c(ifelse(floor(oalhs*20) < 20/2, 1, 2)) == as.integer(as.matrix(my_oa))
# can I verify that the oalhs is an oa?
temp <- t(ifelse(floor(oalhs*20) < 20/2, -1, 1)) %*% ifelse(floor(oalhs*20) < 20/2, -1, 1)
expect_true(all(temp[upper.tri(temp)] == 0))
# 20 rows, 19 columns of 1,2
my_oa <- DoE.base::oa.design(ID = DoE.base::L9.3.4)
oalhs <- oa_to_oalhs(9, 4, my_oa)
expect_true(checkLatinHypercube(oalhs))
# can I verify that the oalhs is an oa?
temp1 <- ifelse(floor(oalhs*9) < 9/3, -1, ifelse(floor(oalhs*9) < 2*9/3, 0, 1))
temp <- t(temp1) %*% temp1
expect_true(all(temp[upper.tri(temp)] == 0))
})
test_that("Edge cases", {
A <- matrix(1L, nrow = 1, ncol = 4)
B <- oa_to_oalhs(1, 4, A)
expect_equal(nrow(B), 1)
expect_true(checkLatinHypercube(B))
})
lhs/tests/testthat.R 0000644 0001762 0000144 00000000233 14555514072 014173 0 ustar ligges users if (require(testthat, quietly = TRUE))
{
library(lhs)
test_check("lhs")
} else {
cat("\ntestthat not available for testing\n")
}
cat("\n\n")
lhs/MD5 0000644 0001762 0000144 00000016202 14640362712 011356 0 ustar ligges users f85d7ad91e5678df6269ab3fe3cd2fb5 *ChangeLog
62af58443b1ed159fd4dbf3b8a0cc8d0 *DESCRIPTION
4ed030018f031be62f46acbce7374459 *NAMESPACE
0678a6de0708199f921c87acbdfced58 *NEWS
53866b3921acf88d621f1b67d6bf5517 *R/augmentLHS.R
89492c484e87dcaa511bcd82bcafd870 *R/correlatedLHS.R
aa739eede2d19e93c67c4db832efc1c5 *R/createOA.R
e0aa8aefc35430288bf424573158dcfb *R/create_oalhs.R
b13a12e4abeb22949dee39121869939d *R/galois_field.R
ee054a9159cb67016025e58156c55044 *R/geneticLHS.R
82ff862f9d061d2f46603f15b34ef69c *R/get_library_versions.R
95a9a84bc9dbbef05bef322257008d78 *R/improvedLHS.r
b9c800f8a568155f058dbf1f23732533 *R/lhs.R
fdb66b50d5d88b352e6989aa64d8d830 *R/maximinLHS.R
365d59c706b63480a2cf5e090425d5bc *R/oa_to_oalhs.R
6ba4bc2876c5d5f1b2ae33186299aa6f *R/optAugmentLHS.R
d3569b956d2f4c5e30ac0e9284416c5e *R/optSeededLHS.R
b6508e5d04a13558e19c6534c768e2a9 *R/optimumLHS.R
601e0117b05e982b7c9a878f244e18e0 *R/quantile_transforms.R
28fe6e3db4a011dd782118c1574ceb9d *R/randomLHS.r
6f6a5d5f3f2e20c0fe869bfe603e0185 *R/runifint.r
c77ba8ff0a839cf9188ee61c77d37f1a *build/vignette.rds
c40fe474107a72a524c98cd5d077c125 *inst/doc/augment_lhs.R
2ea4584a4f8f5710e9e845e365d054ea *inst/doc/augment_lhs.Rmd
cb576800238834f4f34527a2af27b28e *inst/doc/augment_lhs.html
ce027d1b820eebc79f3fc3a62ca6442b *inst/doc/correlated_lhs.R
c5932d5dece75c9a75a7af99bf83368f *inst/doc/correlated_lhs.Rmd
4c66ff03f6796043307ca3bcefe06a9a *inst/doc/correlated_lhs.html
c3dd307ccf782db9eb75f1404cf37fa1 *inst/doc/lhs_basics.R
c48d7934b6c01da2179998d83c78ac05 *inst/doc/lhs_basics.Rmd
4b763a562f6a9baba6e945f8d2f0b002 *inst/doc/lhs_basics.html
96e9e0d002874232f9458386c38d85c3 *inst/doc/lhs_faq.R
c08df90f722ffd24fa2d4df16d2dee50 *inst/doc/lhs_faq.Rmd
5944cbfd5506f72acf172c1847e4ab16 *inst/doc/lhs_faq.html
385a89b5cd29ef880745c295821f72fa *man/augmentLHS.Rd
33a7e19bd547dccf1f27548b8f6f6062 *man/correlatedLHS.Rd
7b6968145fa99250e5417be9b22edf0d *man/createAddelKemp.Rd
4806482382403d66d025c050c04bb127 *man/createAddelKemp3.Rd
9c64475ba6ca348a2c5f8c36822a366b *man/createAddelKempN.Rd
846b4f87068e63a15c433b9564fa90fa *man/createBose.Rd
f72d6ddbfcefb07ae2ba08a46a91e846 *man/createBoseBush.Rd
e125dc387711a92a0c03fc13bb708173 *man/createBoseBushl.Rd
58c37e6cc6b78f73f9bca4f9e9cd4841 *man/createBush.Rd
40113363ae1df9a0296d2884e12e216e *man/createBusht.Rd
38387d12556c313404d634691e62dbd8 *man/create_galois_field.Rd
6099e0cdab76518961760e5b7ba376bb *man/create_oalhs.Rd
d6530dd864441ec8150f21105202585f *man/geneticLHS.Rd
cd434d0087818e7a7181d53b24d60280 *man/get_library_versions.Rd
4f9df18a7f626c11a539d4d7f4c86318 *man/improvedLHS.Rd
8d4d7796995d4ea88ef685b62c1eb733 *man/lhs-package.Rd
3cd0d73f8afe276b3a87095de0a4d2a2 *man/maximinLHS.Rd
602d1ee855b5e630920fb4f2c986ba33 *man/oa_to_oalhs.Rd
66532d58bedf1bb20bb8d3438b467266 *man/optAugmentLHS.Rd
5627fd6f80109463dea8108ad4973f1d *man/optSeededLHS.Rd
7fa0b5b23335f15cff50eebe4a2a8734 *man/optimumLHS.Rd
647d3e752fefe10507ee4451b00bea62 *man/poly2int.Rd
821b9245eb2944e0804502895ef4ca62 *man/poly_prod.Rd
61ee05edb765e3ab2cb4e01583300f4c *man/poly_sum.Rd
84d5d237aabc7e3793aa30ccffc0afa3 *man/quanttrans.Rd
a159b822a2135b86a0f29ab62fbb8a74 *man/randomLHS.Rd
17181180f6a19291e342196042bf6e3f *man/runifint.Rd
f649001939ea771545339996d0bf614f *src/COrthogonalArray.cpp
62ba6f3afa35e087b202baf35923fee1 *src/COrthogonalArray.h
0cc862de831d9d7cfc565914f131cd9b *src/CRandom.h
66ad54cb00e04bda7bd043c888fb8094 *src/GaloisField.cpp
92dc5772d370e69822ec1db2035eb6cc *src/GaloisField.h
37769f1ff1637a4cb937ae9720561c2c *src/LHSCommonDefines.h
94f8191496cca73e709c465d323889a4 *src/Makevars
94f8191496cca73e709c465d323889a4 *src/Makevars.win
f2e8c65df356f45fd2c5fb44c78d61bc *src/OACommonDefines.h
dd1ee7c05925ea002a3177907d472373 *src/RStandardUniform.h
72cfdd82c1f1edd241169ad8404557f9 *src/ak.h
364d8874ce60d9cc5f9522c56592aa3d *src/ak3.cpp
bd451bd4e8a6367a9e662af3178fd088 *src/akconst.cpp
5e4afc32f8f25cf205344f18f24e0db4 *src/akn.cpp
cd71d31647eff2e5a4031944d75689ea *src/bclibVersion.h
c5fe11e7e9d4c35a354f00fc5573b8ab *src/construct.cpp
e691f6ca99931970d3b5eea9afc416ce *src/construct.h
1692f74c2f90138327bb8904172cf751 *src/geneticLHS.cpp
e2914a2b526a8c80a31843df76188edb *src/improvedLHS.cpp
bc1767818bc7144993f2e0909087de44 *src/init.c
3e5fe0bbb790b6339dbfa15267eb22ae *src/lhs_r.cpp
24028cfc13b59c29abf5aeab955977b7 *src/lhs_r.h
112f5c99cef0996cfca96bdeea2fe88f *src/lhs_r_utilities.cpp
79e9b90afac77b5e3bdcf9c51c590c17 *src/lhs_r_utilities.h
98217866c6cb0fbc84feaeb59ae08413 *src/lhslibVersion.h
0d2df277fe0f51f1ce2980560d73c27e *src/matrix.h
72fe2357d4540e4d80aa662fd2f0981b *src/maximinLHS.cpp
c67aea4118cb38c26d0e511be2f4fff5 *src/oa.cpp
9186645f453a8590983abe4ce5212b51 *src/oa.h
e8093b9f81b7d27e75a57848e447d9f2 *src/oaLHS.cpp
45784fc7ab4837578bab9e1f79996fa1 *src/oaLHS.h
48c4f02615675fb58426db9187f9c968 *src/oaLHSUtility.h
1dcc1997f84c0f74b09d4b05f506877e *src/oa_r.cpp
d3159e8a39d74eb007a44b8033dd3e3b *src/oa_r.h
2bff89932d7176fecba89b681f3a12bd *src/oa_r_utils.cpp
5267128af97c78e8ceabe3fbffd8c5a2 *src/oa_r_utils.h
4d6b2c775d7fd2ee8b11e70ae4bc14a0 *src/oalhs_r.cpp
a6aac0d67ee324863cf8f33be0db0760 *src/oalhs_r.h
9454c11d37e62069bed7539ff7acdd69 *src/oalibVersion.h
edbce6a4953d22430c00f5c163af41b2 *src/optSeededLHS.cpp
71acbe84d04803e29e466eba3dbd976e *src/optimumLHS.cpp
0537b1d2d2a33374e68331c1d4580fe7 *src/order.h
87c5d2c644299dcf7797cbf412fb3421 *src/primes.cpp
3cbd87dbf024f0f2ba7bb9651ffbc424 *src/primes.h
2d00c5e4034621e2afffa47ccb044e08 *src/randomLHS.cpp
26a0b7412291e79921b093608db28de3 *src/runif.cpp
3f03595049a5e1bdc345b4e5fff505f4 *src/runif.h
2a58c2e95481fe9f2d175db2bfc37947 *src/rutils.cpp
bea92a5cc44b9602eedc5755adcb5e98 *src/rutils.h
afc1a761a5f5bb4b5db9ab34916ab507 *src/utilityLHS.cpp
4e63bafed4e840585c42c6d4bb49379e *src/utilityLHS.h
a9bf6acf833600dc23ea431f7cb8e97e *src/xtn.h
865a2e5b7e26745b142ca68350d7188f *tests/testthat.R
da6651eaf210fa98c427071829012e18 *tests/testthat/helper-lhs.R
2c7d56d1b00eda9f385a6ed433dcdc24 *tests/testthat/test-augmentlhs.R
b4fb47fe8ccfbc0beddaa2a7e0f5bdcd *tests/testthat/test-correlatedLHS.R
fe803b4ea05122c1f33700d69f4a4f42 *tests/testthat/test-create_oalhs.R
7cc11e06c2df3b7264e0fdbf260a0dd1 *tests/testthat/test-createoa.R
c287deed72e875c904ec4e99fbdcfcea *tests/testthat/test-galois_field.R
15105804ea6ef87a0d84eea60d8f94fd *tests/testthat/test-geneticlhs.R
e48c9878ba49cecbade584e08d315db1 *tests/testthat/test-get_library_versions.R
505ec17330211656bc054d0f057b2968 *tests/testthat/test-improvedlhs.r
abe052846c141cde46a81566e8ca5199 *tests/testthat/test-maximinlhs.R
11321200de36a73acabbee6cf2b1da41 *tests/testthat/test-oa_to_oalhs.R
5463fe0a9c89f0f762a89cbd461f57d0 *tests/testthat/test-optaugmentlhs.R
f810d8f8b902a3398eb7d300701a4296 *tests/testthat/test-optimumlhs.R
e0518a44b176a6478581f36e770c3435 *tests/testthat/test-optseededlhs.R
08cd8f9ff50ebba27865bd541b4476a4 *tests/testthat/test-quantile_transforms.R
96ab06789300bcfe9a875b3fda47b218 *tests/testthat/test-randomlhs.r
8f248fe4625ca971df2ab8b8f503fc6d *vignettes/VignetteCommonCode.R
2ea4584a4f8f5710e9e845e365d054ea *vignettes/augment_lhs.Rmd
c5932d5dece75c9a75a7af99bf83368f *vignettes/correlated_lhs.Rmd
c48d7934b6c01da2179998d83c78ac05 *vignettes/lhs_basics.Rmd
c08df90f722ffd24fa2d4df16d2dee50 *vignettes/lhs_faq.Rmd
lhs/R/ 0000755 0001762 0000144 00000000000 14615756445 011262 5 ustar ligges users lhs/R/createOA.R 0000644 0001762 0000144 00000032055 14640333203 013052 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Create an orthogonal array using the Bose algorithm.
#'
#' The \code{bose} program
#' produces \code{OA( q^2, k, q, 2 )}, \code{k <= q+1} for prime powers \code{q}.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @references
#' Owen, Art. Orthogonal Arrays for: Computer Experiments, Visualizations, and
#' Integration in high dimensions. \url{https://lib.stat.cmu.edu/designs/oa.c}. 1994
#' R.C. Bose (1938) Sankhya Vol 3 pp 323-338
#' @examples
#' A <- createBose(3, 3, FALSE)
#' B <- createBose(5, 4, TRUE)
#' @seealso Other methods to create orthogonal arrays [createBush()],
#' [createBoseBush()], [createAddelKemp()], [createAddelKemp3()],
#' [createAddelKempN()], [createBusht()], [createBoseBushl()]
createBose <- function(q, ncol, bRandom=TRUE)
{
return(.Call("oa_type1", "bose", as.integer(q), as.integer(ncol),
as.logical(bRandom), PACKAGE = "lhs"))
}
#' Create an orthogonal array using the Bose-Bush algorithm.
#'
#' The \code{bosebush} program
#' produces \code{OA( 2q^2, k, q, 2 )}, \code{k <= 2q+1}, for powers of 2, \code{q=2^r}.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @references
#' Owen, Art. Orthogonal Arrays for: Computer Experiments, Visualizations, and
#' Integration in high dimensions. \url{https://lib.stat.cmu.edu/designs/oa.c}. 1994
#' R.C. Bose and K.A. Bush (1952) Annals of Mathematical Statistics, Vol 23 pp 508-524.
#' @examples
#' A <- createBoseBush(4, 3, FALSE)
#' B <- createBoseBush(8, 3, TRUE)
#' @seealso Other methods to create orthogonal arrays [createBush()],
#' [createBose()], [createAddelKemp()], [createAddelKemp3()],
#' [createAddelKempN()], [createBusht()], [createBoseBushl()]
createBoseBush <- function(q, ncol, bRandom=TRUE)
{
return(.Call("oa_type1", "bosebush", as.integer(q), as.integer(ncol),
as.logical(bRandom), PACKAGE = "lhs"))
}
#' Create an orthogonal array using the Bush algorithm.
#'
#' The \code{bush} program
#' produces \code{OA( q^3, k, q, 3 )}, \code{k <= q+1} for prime powers \code{q}.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @references
#' Owen, Art. Orthogonal Arrays for: Computer Experiments, Visualizations, and
#' Integration in high dimensions. \url{https://lib.stat.cmu.edu/designs/oa.c}. 1994
#' K.A. Bush (1952) Annals of Mathematical Statistics, Vol 23 pp 426-434
#' @examples
#' A <- createBush(3, 3, FALSE)
#' B <- createBush(4, 5, TRUE)
#' @seealso Other methods to create orthogonal arrays [createBoseBush()],
#' [createBose()], [createAddelKemp()], [createAddelKemp3()],
#' [createAddelKempN()], [createBusht()], [createBoseBushl()]
createBush <- function(q, ncol, bRandom=TRUE)
{
return(.Call("oa_type1", "bush", as.integer(q), as.integer(ncol),
as.logical(bRandom), PACKAGE = "lhs"))
}
#' Create an orthogonal array using the Addelman-Kempthorne algorithm.
#'
#' The \code{addelkemp} program produces \code{OA( 2q^2, k, q, 2 )}, \code{k <= 2q+1},
#' for odd prime powers \code{q}.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @references
#' Owen, Art. Orthogonal Arrays for: Computer Experiments, Visualizations, and
#' Integration in high dimensions. \url{https://lib.stat.cmu.edu/designs/oa.c}. 1994
#' S. Addelman and O. Kempthorne (1961) Annals of Mathematical Statistics, Vol 32 pp 1167-1176.
#' @examples
#' A <- createAddelKemp(3, 3, TRUE)
#' B <- createAddelKemp(3, 5, FALSE)
#' @seealso Other methods to create orthogonal arrays [createBoseBush()],
#' [createBose()], [createAddelKemp3()],
#' [createAddelKempN()], [createBusht()], [createBoseBushl()]
createAddelKemp <- function(q, ncol, bRandom=TRUE)
{
return(.Call("oa_type1", "addelkemp", as.integer(q), as.integer(ncol),
as.logical(bRandom), PACKAGE = "lhs"))
}
#' Create an orthogonal array using the Addelman-Kempthorne algorithm
#' with \code{2q^3} rows.
#'
#' The \code{addelkemp3} program produces
#' \code{OA( 2*q^3, k, q, 2 )}, \code{k <= 2q^2+2q+1}, for prime powers \code{q}.
#' \code{q} may be an odd prime power, or \code{q} may be 2 or 4.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @references
#' Owen, Art. Orthogonal Arrays for: Computer Experiments, Visualizations, and
#' Integration in high dimensions. \url{https://lib.stat.cmu.edu/designs/oa.c}. 1994
#' S. Addelman and O. Kempthorne (1961) Annals of Mathematical Statistics, Vol 32 pp 1167-1176.
#' @examples
#' A <- createAddelKemp3(3, 3, TRUE)
#' B <- createAddelKemp3(3, 5, FALSE)
#' @seealso Other methods to create orthogonal arrays [createBushBush()],
#' [createBose()], [createAddelKemp()],
#' [createAddelKempN()], [createBusht()], [createBoseBushl()]
createAddelKemp3 <- function(q, ncol, bRandom=TRUE)
{
return(.Call("oa_type1", "addelkemp3", as.integer(q), as.integer(ncol),
as.logical(bRandom), PACKAGE = "lhs"))
}
#' Create an orthogonal array using the Bush algorithm with alternate strength.
#'
#' The \code{busht} program produces \code{OA( q^t, k, q, t )}, \code{k <= q+1}, \code{t>=3},
#' for prime powers \code{q}.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param strength the strength of the array to be created
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @references
#' Owen, Art. Orthogonal Arrays for: Computer Experiments, Visualizations, and
#' Integration in high dimensions. \url{https://lib.stat.cmu.edu/designs/oa.c}. 1994
#' K.A. Bush (1952) Annals of Mathematical Statistics, Vol 23 pp 426-434
#' @examples
#' set.seed(1234)
#' A <- createBusht(3, 4, 2, TRUE)
#' B <- createBusht(3, 4, 3, FALSE)
#' G <- createBusht(3, 4, 3, TRUE)
#' @seealso Other methods to create orthogonal arrays [createBoseBush()],
#' [createBose()], [createAddelKemp()], [createAddelKemp3()],
#' [createAddelKempN()], [createBoseBushl()]
createBusht <- function(q, ncol, strength, bRandom=TRUE)
{
return(.Call("oa_type2", "busht", as.integer(strength), as.integer(q),
as.integer(ncol), as.logical(bRandom), PACKAGE = "lhs"))
}
#' Create an orthogonal array using the Bose-Bush algorithm with alternate strength >= 3.
#'
#' The \code{bosebushl} program produces \code{OA( lambda*q^2, k, q, 2 )},
#' \code{k <= lambda*q+1}, for prime powers \code{q} and \code{lambda > 1}. Both \code{q} and
#' \code{lambda} must be powers of the same prime.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param lambda the lambda of the BoseBush algorithm
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @references
#' Owen, Art. Orthogonal Arrays for: Computer Experiments, Visualizations, and
#' Integration in high dimensions. \url{https://lib.stat.cmu.edu/designs/oa.c}. 1994
#' R.C. Bose and K.A. Bush (1952) Annals of Mathematical Statistics, Vol 23 pp 508-524.
#' @examples
#' A <- createBoseBushl(3, 3, 3, TRUE)
#' B <- createBoseBushl(4, 4, 16, TRUE)
#' @seealso Other methods to create orthogonal arrays [createBoseBush()],
#' [createBose()], [createBush()], [createAddelKemp()], [createAddelKemp3()],
#' [createAddelKempN()], [createBusht()]
createBoseBushl <- function(q, ncol, lambda, bRandom=TRUE)
{
return(.Call("oa_type2", "bosebushl", as.integer(lambda), as.integer(q),
as.integer(ncol), as.logical(bRandom), PACKAGE = "lhs"))
}
#' Create an orthogonal array using the Addelman-Kempthorne algorithm with
#' alternate strength with \code{2q^n} rows.
#'
#' The \code{addelkempn} program produces
#' \code{OA( 2*q^n, k, q, 2 )}, \code{k <= 2(q^n - 1)/(q-1)-1}, for prime powers \code{q}.
#' \code{q} may be an odd prime power, or \code{q} may be 2 or 4.
#'
#' From Owen: An orthogonal array \code{A} is a matrix of \code{n} rows, \code{k}
#' columns with every element being one of \code{q} symbols
#' \code{0,...,q-1}. The array has strength \code{t} if, in every \code{n} by \code{t}
#' submatrix, the \code{q^t} possible distinct rows, all appear
#' the same number of times. This number is the index
#' of the array, commonly denoted \code{lambda}. Clearly,
#' \code{lambda*q^t=n}. The notation for such an array is \code{OA( n, k, q, t )}.
#'
#' @param q the number of symbols in the array
#' @param ncol number of parameters or columns
#' @param exponent the exponent on q
#' @param bRandom should the array be randomized
#' @return an orthogonal array
#' @export
#' @examples A <- createAddelKempN(3, 4, 3, TRUE)
#' B <- createAddelKempN(3, 4, 4, TRUE)
#' @seealso Other methods to create orthogonal arrays [createBoseBush()],
#' [createBose()], [createBush()], [createAddelKemp()], [createAddelKemp3()],
#' [createBusht()], [createBoseBushl()]
createAddelKempN <- function(q, ncol, exponent, bRandom=TRUE)
{
return(.Call("oa_type2", "addelkempn", as.integer(exponent), as.integer(q),
as.integer(ncol), as.logical(bRandom), PACKAGE = "lhs"))
}
lhs/R/maximinLHS.R 0000644 0001762 0000144 00000016417 14115501152 013401 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Maximin Latin Hypercube Sample
#'
#' Draws a Latin Hypercube Sample from a set of uniform distributions for use in
#' creating a Latin Hypercube Design. This function attempts to optimize the
#' sample by maximizing the minium distance between design points (maximin criteria).
#'
#' @details Latin hypercube sampling (LHS) was developed to generate a distribution
#' of collections of parameter values from a multidimensional distribution.
#' A square grid containing possible sample points is a Latin square iff there
#' is only one sample in each row and each column. A Latin hypercube is the
#' generalisation of this concept to an arbitrary number of dimensions. When
#' sampling a function of \code{k} variables, the range of each variable is divided
#' into \code{n} equally probable intervals. \code{n} sample points are then drawn such that a
#' Latin Hypercube is created. Latin Hypercube sampling generates more efficient
#' estimates of desired parameters than simple Monte Carlo sampling.
#'
#' This program generates a Latin Hypercube Sample by creating random permutations
#' of the first \code{n} integers in each of \code{k} columns and then transforming those
#' integers into n sections of a standard uniform distribution. Random values are
#' then sampled from within each of the n sections. Once the sample is generated,
#' the uniform sample from a column can be transformed to any distribution by
#' using the quantile functions, e.g. qnorm(). Different columns can have
#' different distributions.
#'
#' Here, values are added to the design one by one such that the maximin criteria is
#' satisfied.
#'
#' @param n The number of partitions (simulations or design points or rows)
#' @param k The number of replications (variables or columns)
#' @param method \code{build} or \code{iterative} is the method of LHS creation.
#' \code{build} finds the next best point while constructing the LHS.
#' \code{iterative} optimizes the resulting sample on [0,1] or sample grid on [1,N]
#' @param dup A factor that determines the number of candidate points used in the
#' search. A multiple of the number of remaining points than can be
#' added. This is used when \code{method="build"}
#' @param eps The minimum percent change in the minimum distance used in the
#' \code{iterative} method
#' @param maxIter The maximum number of iterations to use in the \code{iterative} method
#' @param optimize.on \code{grid} or \code{result} gives the basis of the optimization.
#' \code{grid} optimizes the LHS on the underlying integer grid.
#' \code{result} optimizes the resulting sample on [0,1]
#' @param debug prints additional information about the process of the optimization
#'
#' @return An \code{n} by \code{k} Latin Hypercube Sample matrix with values uniformly distributed on [0,1]
#' @export
#' @keywords design
#' @importFrom stats dist
#'
#' @references
#' Stein, M. (1987) Large Sample Properties of Simulations Using Latin Hypercube Sampling.
#' \emph{Technometrics}. \bold{29}, 143--151.
#'
#' This function is motivated by the MATLAB program written by John Burkardt and modified 16 Feb 2005
#' \url{https://people.math.sc.edu/Burkardt/m_src/ihs/ihs.html}
#'
#' @seealso [randomLHS()], [geneticLHS()], [improvedLHS()] and [optimumLHS()]
#' to generate Latin Hypercube Samples. [optAugmentLHS()], [optSeededLHS()], and
#' [augmentLHS()] to modify and augment existing designs.
#'
#' @examples
#' set.seed(1234)
#' A1 <- maximinLHS(4, 3, dup=2)
#' A2 <- maximinLHS(4, 3, method="build", dup=2)
#' A3 <- maximinLHS(4, 3, method="iterative", eps=0.05, maxIter=100, optimize.on="grid")
#' A4 <- maximinLHS(4, 3, method="iterative", eps=0.05, maxIter=100, optimize.on="result")
maximinLHS <- function(n, k, method="build", dup=1, eps=0.05, maxIter=100,
optimize.on="grid", debug=FALSE)
{
if (!(method %in% c("build","iterative")))
stop("Method not recognized. Please use 'build' or 'iterative'")
if (!(optimize.on %in% c("grid","result")))
stop("optimize.on parameter must be 'result' or 'grid'")
if (debug)
cat("Debug info:\n")
if (method == "build")
{
if (optimize.on == "result")
{
if (debug) cat(paste0("\toptimize.on=", optimize.on, " method=", method, "\n"))
warning("when method='build' then optimize.on is set to 'grid'")
}
result <- .Call("maximinLHS_cpp", as.integer(n), as.integer(k), as.integer(dup),
PACKAGE = "lhs")
return(result)
} else if (method == "iterative")
{
if (length(n) != 1 | length(k) != 1 | length(dup) != 1)
stop("n, k, and dup may not be vectors")
if (any(is.na(c(n,k,dup)))) stop("n, k, and dup may not be NA or NaN")
if (any(is.infinite(c(n,k,dup)))) stop("n, k, and dup may not be infinite")
if (n != floor(n) | n < 1) stop("n must be a positive integer")
if (k != floor(k) | k < 1) stop("k must be a positive integer")
if (dup != floor(dup) | dup < 1)
stop("The dup factor must be a positive integer")
adjust <- runif(n*k)
if (optimize.on == "result")
{
X <- randomLHS(n, k, preserveDraw = FALSE)
} else if (optimize.on == "grid")
{
X <- sapply(1:k, function(x) order(runif(n)))
}
Y <- X
minDist <- min(dist(X))
# the columns of res are the ith row, mth row, jth column, and the min distance when those are exchanged
res <- matrix(0, nrow = k*choose(n, 2), ncol = 4)
iter <- 1
while (iter < maxIter)
{
# counter is the counter down the rows of res
counter <- 1
# try all pairwise row swaps within each column in the LHS
for (j in 1:k)
{
for (i in 1:(n - 1))
{
for (m in (i + 1):n)
{
# swap
Y[i,j] <- X[m,j]
Y[m,j] <- X[i,j]
# record the result
res[counter,1] <- i
res[counter,2] <- m
res[counter,3] <- j
res[counter,4] <- min(dist(Y))
# swap back
Y[i,j] <- X[i,j]
Y[m,j] <- X[m,j]
counter <- counter + 1
}
}
}
# find the best swap
ind <- which.max(res[,4])
# make the swap
Y[res[ind,1],res[ind,3]] <- X[res[ind,2],res[ind,3]]
Y[res[ind,2],res[ind,3]] <- X[res[ind,1],res[ind,3]]
temp <- min(dist(Y))
# test the new minimum distance between points
if (temp < minDist)
{
if (debug)
cat("\tstopped because no changes improved minimum distance\n")
if (optimize.on == "result")
return(X)
else
return((X - 1 + matrix(adjust, nrow = n, ncol = k))/n)
}
if (res[ind,4] < (1 + eps)*minDist)
{
if (debug)
cat("\tstopped because the minimum improvement was not reached\n")
if (optimize.on == "result")
return(Y)
else
return((Y - 1 + matrix(adjust, nrow = n, ncol = k))/n)
} else
{
minDist <- temp
X <- Y
}
iter <- iter + 1
}
if (debug)
cat("\tstoped on iterations\n")
if (optimize.on == "result")
return(Y)
else
return((Y - 1 + matrix(adjust, nrow = n, ncol = k))/n)
}
}
lhs/R/galois_field.R 0000644 0001762 0000144 00000010743 14115430532 014010 0 ustar ligges users # Copyright 2021 Robert Carnell
#' Create a Galois field
#'
#' @param q The order of the Galois Field q = p^n
#'
#' @return a GaloisField object containing
#' \describe{
#' \item{n}{q = p^n}
#' \item{p}{The prime modulus of the field q=p^n}
#' \item{q}{The order of the Galois Field q = p^n. \code{q} must be a prime power.}
#' \item{xton}{coefficients of the characteristic polynomial where the first coefficient is on $x^0$, the second is on $x^1$ and so on}
#' \item{inv}{An index for which row of \code{poly} (zero based) is the multiplicative inverse of this row. An \code{NA} indicates that this row of \code{poly} has no inverse. e.g. c(3, 4) means that row 4=3+1 is the inverse of row 1 and row 5=4+1 is the inverse of row 2}
#' \item{neg}{An index for which row of \code{poly} (zero based) is the negative or additive inverse of this row. An \code{NA} indicates that this row of \code{poly} has no negative. e.g. c(3, 4) means that row 4=3+1 is the negative of row 1 and row 5=4+1 is the negative of row 2}
#' \item{root}{An index for which row of \code{poly} (zero based) is the square root of this row. An \code{NA} indicates that this row of \code{poly} has no square root. e.g. c(3, 4) means that row 4=3+1 is the square root of row 1 and row 5=4+1 is the square root of row 2}
#' \item{plus}{sum table of the Galois Field}
#' \item{times}{multiplication table of the Galois Field}
#' \item{poly}{rows are polynomials of the Galois Field where the entries are the coefficients of the polynomial where the first coefficient is on $x^0$, the second is on $x^1$ and so on}
#' }
#' @export
#'
#' @examples
#' gf <- create_galois_field(4);
create_galois_field <- function(q)
{
if (q <= 1) stop("q must be 2 or greater")
gf <- .Call("create_galois_field", as.integer(q), PACKAGE = "lhs")
class(gf) <- "GaloisField"
names(gf) <- c("n", "p", "q", "xton", "inv", "neg", "root", "plus", "times", "poly")
if (any(gf$inv == -1))
gf$inv[which(gf$inv == -1)] <- NA
if (any(gf$neg == -1))
gf$neg[which(gf$neg == -1)] <- NA
if (any(gf$root == -1))
gf$root[which(gf$root == -1)] <- NA
return(gf)
}
#' Multiplication in polynomial representation
#'
#' @param p modulus
#' @param n length of polynomials
#' @param xton characteristic polynomial vector for the field (x to the n power)
#' @param p1 polynomial vector 1
#' @param p2 polynomial vector 2
#'
#' @return the product of p1 and p2
#' @export
#'
#' @examples
#' gf <- create_galois_field(4)
#' a <- poly_prod(gf$p, gf$n, gf$xton, c(1, 0), c(0, 1))
#' stopifnot(all(a == c(0, 1)))
poly_prod <- function(p, n, xton, p1, p2)
{
if (n != length(xton)) stop("the length of xton must be n")
if (n != length(p1)) stop("the length of p1 must be n")
if (n != length(p2)) stop("the length of p2 must be n")
if (any(c(xton, p1, p2) >= p)) stop("The entries of the polynomial vectors must be less than p")
if (any(c(xton, p1, p2) < 0)) stop("The entries of the polynomial vectors must be greater than 0")
.Call("poly_prod", as.integer(p), as.integer(n), as.integer(xton),
as.integer(p1), as.integer(p2), PACKAGE = "lhs")
}
#' Addition in polynomial representation
#'
#' @param p modulus
#' @param n length of polynomial 1 and 2
#' @param p1 polynomial vector 1
#' @param p2 polynomial vector 2
#'
#' @return the sum of p1 and p2
#' @export
#'
#' @examples
#' gf <- create_galois_field(4)
#' a <- poly_sum(gf$p, gf$n, c(1, 0), c(0, 1))
#' stopifnot(all(a == c(1, 1)))
poly_sum <- function(p, n, p1, p2)
{
if (n != length(p1)) stop("the length of p1 must be n")
if (n != length(p2)) stop("the length of p2 must be n")
if (any(c(p1, p2) >= p)) stop("The entries of the polynomial vectors must be less than p")
if (any(c(p1, p2) < 0)) stop("The entries of the polynomial vectors must be greater than 0")
.Call("poly_sum", as.integer(p), as.integer(n), as.integer(p1), as.integer(p2),
PACKAGE = "lhs")
}
#' Convert polynomial to integer in 0..q-1
#'
#' @param p modulus
#' @param n the length of poly
#' @param poly the polynomial vector
#'
#' @return an integer
#' @export
#'
#' @examples
#' gf <- create_galois_field(4)
#' stopifnot(poly2int(gf$p, gf$n, c(0, 0)) == 0)
poly2int <- function(p, n, poly)
{
if (n != length(poly)) stop("the length of poly must be n")
if (any(poly >= p)) stop("The entries of the polynomial vectors must be less than p")
if (any(poly < 0)) stop("The entries of the polynomial vectors must be greater than 0")
.Call("poly2int", as.integer(p), as.integer(n), as.integer(poly), PACKAGE = "lhs")
}
lhs/R/randomLHS.r 0000644 0001762 0000144 00000001717 13416442547 013273 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Construct a random Latin hypercube design
#'
#' \code{randomLHS(4,3)} returns a 4x3 matrix with each column
#' constructed as follows: A random permutation of (1,2,3,4) is
#' generated, say (3,1,2,4) for each of K columns. Then a uniform
#' random number is picked from each indicated quartile. In this
#' example a random number between .5 and .75 is chosen, then one between
#' 0 and .25, then one between .25 and .5, finally one between
#' .75 and 1.
#'
#' @param n the number of rows or samples
#' @param k the number of columns or parameters/variables
#' @param preserveDraw should the draw be constructed so that it is the same for variable numbers of columns?
#' @return a Latin hypercube sample
#' @export
#'
#' @examples
#' a <- randomLHS(5, 3)
randomLHS <- function(n, k, preserveDraw=FALSE)
{
.Call("randomLHS_cpp", as.integer(n), as.integer(k), as.logical(preserveDraw),
PACKAGE = "lhs")
}
lhs/R/correlatedLHS.R 0000644 0001762 0000144 00000011016 14616012777 014071 0 ustar ligges users #' Transformed Latin hypercube with a multivariate distribution
#'
#' @description
#' A method to create a transformed Latin Hypercube sample where the marginal
#' distributions can be correlated according to an arbitrary set of criteria
#' contained in a minimized cost function
#'
#' @param lhs a Latin hypercube sample that is uniformly distributed on the
#' margins
#' @param marginal_transform_function a function that takes Latin hypercube sample
#' as the first argument and other passed-through variables as desired. \code{...} must
#' be passed as a argument. For example, \code{f <- function(W, second_argument, ...)}.
#' Must return a \code{matrix} or \code{data.frame}
#' @param cost_function a function that takes a transformed Latin hypercube sample
#' as the first argument and other passed-through variables as desired. \code{...} must
#' be passed as a argument. For example, \code{f <- function(W, second_argument, ...)}
#' @param debug Should debug messages be printed. Causes cost function output
#' and iterations to be printed to aid in setting the maximum number of iterations
#' @param maxiter the maximum number of iterations. The algorithm proceeds by
#' swapping one variable of two points at a time. Each swap is an iteration.
#' @param ... Additional arguments to be passed through to the \code{marginal_transform_function}
#' and \code{cost_function}
#'
#' @return a list of the Latin hypercube with uniform margins, the transformed
#' Latin hypercube, and the final cost
#' @export
#'
#' @examples
#' correlatedLHS(lhs::randomLHS(30, 2),
#' marginal_transform_function = function(W, ...) {
#' W[,1] <- qnorm(W[,1], 1, 3)
#' W[,2] <- qexp(W[,2], 2)
#' return(W)
#' },
#' cost_function = function(W, ...) {
#' (cor(W[,1], W[,2]) - 0.5)^2
#' },
#' debug = FALSE,
#' maxiter = 1000)
correlatedLHS <- function(lhs, marginal_transform_function,
cost_function, debug=FALSE, maxiter=10000,
...) {
Nlhs <- nrow(lhs)
k <- ncol(lhs)
## Initial transform and cost
lhs_t <- marginal_transform_function(lhs, ...)
if (!is.matrix(lhs_t) & !is.data.frame(lhs_t)) {
stop("The marginal_transform_function should return a matrix or data.frame")
}
if (!all(dim(lhs) == dim(lhs_t))) {
stop("resulting design after transformation with marginal_transform_function must be the same dimension as the original design")
}
initial_cost <- cost_function(lhs_t, ...)
if (!is.numeric(initial_cost) | length(initial_cost) != 1) {
stop("cost function must return a single cost as a numeric")
}
if (debug) {
cat(paste0("\nInitial cost: ", initial_cost, "\n"))
}
best_cost <- initial_cost
swap <- function(i1, i2, j1, j2, dat) {
temp <- dat[i1, j1]
dat[i1, j1] <- dat[i2, j2]
dat[i2, j2] <- temp
return(dat)
}
iter <- 1
recent_improvement <- TRUE
while (best_cost > 0 & iter < maxiter & recent_improvement) {
recent_improvement <- FALSE
if (debug && iter > 1) {
cat(paste0("Iteration ", iter, " with cost: ", best_cost, "\n"))
}
for (j in 1:k) {
if (debug) {
cat(paste0("\tSwapping on Variable ", j, " of ", k, " on Iteration ", iter, "\n"))
}
for (i1 in 1:(Nlhs-1)) {
for (i2 in i1:Nlhs) {
if (best_cost > 0) {
lhs <- swap(i1, i2, j, j, lhs)
lhs_t <- marginal_transform_function(lhs, ...)
cost <- cost_function(lhs_t, ...)
if (cost >= best_cost) {
# swap back
lhs <- swap(i2, i1, j, j, lhs)
} else {
# stay here and update best
best_cost <- cost
recent_improvement <- TRUE
break
}
iter <- iter + 1
if (best_cost == 0 | iter > maxiter) break
}
}
if (best_cost == 0 | iter > maxiter) break
}
if (best_cost == 0 | iter > maxiter) break
}
}
if (debug) {
cat(paste0("Ending at cost: ", best_cost, "\n"))
if (!all(apply(lhs, 2, function(x) sum(floor(x*Nlhs)+1)) == Nlhs*(Nlhs+1)/2)) {
stop("Latin hypercube not created in swapping process")
}
if (!all(lhs > 0 & lhs < 1)) {
stop("Latin hypercube sample outside bounds (0,1)")
}
}
return(list(lhs = lhs,
transformed_lhs = marginal_transform_function(lhs, ...), # the transform might not have been after swapping back
cost = best_cost))
}
lhs/R/oa_to_oalhs.R 0000644 0001762 0000144 00000002012 13425355365 013661 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Create a Latin hypercube from an orthogonal array
#'
#' @param n the number of samples or rows in the LHS (integer)
#' @param k the number of parameters or columns in the LHS (integer)
#' @param oa the orthogonal array to be used as the basis for the LHS (matrix of integers) or data.frame of factors
#'
#' @return a numeric matrix which is a Latin hypercube sample
#' @export
#'
#' @examples
#' oa <- createBose(3, 4, TRUE)
#' B <- oa_to_oalhs(9, 4, oa)
oa_to_oalhs <- function(n, k, oa)
{
if (is.integer(oa) && is.matrix(oa))
{
return(.Call("oa_to_lhs", as.integer(n), as.integer(k), oa,
FALSE, PACKAGE = "lhs"))
} else if (is.data.frame(oa))
{
Y <- as.matrix(oa)
Z <- matrix(as.integer(Y), nrow = nrow(oa), ncol = ncol(oa))
return(.Call("oa_to_lhs", as.integer(n), as.integer(k), Z,
FALSE, PACKAGE = "lhs"))
} else
{
stop("oa must be an integer matrix or it must be a data.frame of factors")
}
}
lhs/R/lhs.R 0000644 0001762 0000144 00000000152 13416455041 012153 0 ustar ligges users # Copyright 2019 Robert Carnell
#' @useDynLib lhs
#' @keywords internal
#' @import Rcpp
"_PACKAGE"
lhs/R/optimumLHS.R 0000644 0001762 0000144 00000006263 13425060436 013437 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Optimum Latin Hypercube Sample
#'
#' Draws a Latin Hypercube Sample from a set of uniform distributions for use in
#' creating a Latin Hypercube Design. This function uses the Columnwise
#' Pairwise (\acronym{CP}) algorithm to generate an optimal design with respect to the S
#' optimality criterion.
#'
#' @details Latin hypercube sampling (LHS) was developed to generate a distribution
#' of collections of parameter values from a multidimensional distribution.
#' A square grid containing possible sample points is a Latin square iff there
#' is only one sample in each row and each column. A Latin hypercube is the
#' generalisation of this concept to an arbitrary number of dimensions. When
#' sampling a function of \code{k} variables, the range of each variable is divided
#' into \code{n} equally probable intervals. \code{n} sample points are then drawn such that a
#' Latin Hypercube is created. Latin Hypercube sampling generates more efficient
#' estimates of desired parameters than simple Monte Carlo sampling.
#'
#' This program generates a Latin Hypercube Sample by creating random permutations
#' of the first \code{n} integers in each of \code{k} columns and then transforming those
#' integers into n sections of a standard uniform distribution. Random values are
#' then sampled from within each of the n sections. Once the sample is generated,
#' the uniform sample from a column can be transformed to any distribution by
#' using the quantile functions, e.g. qnorm(). Different columns can have
#' different distributions.
#'
#' S-optimality seeks to maximize the mean distance from each design point to all
#' the other points in the design, so the points are as spread out as possible.
#'
#' This function uses the \acronym{CP} algorithm to generate an optimal
#' design with respect to the S optimality criterion.
#'
#' @param n The number of partitions (simulations or design points or rows)
#' @param k The number of replications (variables or columns)
#' @param maxSweeps The maximum number of times the CP algorithm is applied to all the columns.
#' @param eps The optimal stopping criterion. Algorithm stops when the change in
#' optimality measure is less than eps*100\% of the previous value.
#' @param verbose Print informational messages
#'
#' @return An \code{n} by \code{k} Latin Hypercube Sample matrix with values uniformly distributed on [0,1]
#' @export
#' @seealso [randomLHS()], [geneticLHS()], [improvedLHS()] and [maximinLHS()]
#' to generate Latin Hypercube Samples. [optAugmentLHS()], [optSeededLHS()], and
#' [augmentLHS()] to modify and augment existing designs.
#' @keywords design
#'
#' @references
#' Stocki, R. (2005) A method to improve design reliability using optimal Latin hypercube sampling
#' \emph{Computer Assisted Mechanics and Engineering Sciences} \bold{12}, 87--105.
#'
#' @examples
#' A <- optimumLHS(4, 3, 5, .05)
optimumLHS <- function(n=10, k=2, maxSweeps=2, eps=.1, verbose=FALSE)
{
result <- .Call("optimumLHS_cpp", as.integer(n), as.integer(k),
as.integer(maxSweeps), eps, as.logical(verbose),
PACKAGE = "lhs")
return(result)
}
lhs/R/optAugmentLHS.R 0000644 0001762 0000144 00000007223 13425061474 014070 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Optimal Augmented Latin Hypercube Sample
#'
#' Augments an existing Latin Hypercube Sample, adding points to the design, while
#' maintaining the \emph{latin} properties of the design. This function attempts to
#' add the points to the design in an optimal way.
#'
#' Augments an existing Latin Hypercube Sample, adding points to the design, while
#' maintaining the \emph{latin} properties of the design. This function attempts to
#' add the points to the design in a way that maximizes S optimality.
#'
#' S-optimality seeks to maximize the mean distance from each design point to all
#' the other points in the design, so the points are as spread out as possible.
#'
#' @param lhs The Latin Hypercube Design to which points are to be added
#' @param m The number of additional points to add to matrix \code{lhs}
#' @param mult \code{m*mult} random candidate points will be created.
#'
#' @return An \code{n} by \code{k} Latin Hypercube Sample matrix with values uniformly distributed on [0,1]
#' @export
#' @keywords design
#' @seealso
#' [randomLHS()], [geneticLHS()], [improvedLHS()], [maximinLHS()], and
#' [optimumLHS()] to generate Latin Hypercube Samples. [optSeededLHS()] and
#' [augmentLHS()] to modify and augment existing designs.
#' @importFrom stats runif na.exclude na.omit
#'
#' @references
#' Stein, M. (1987) Large Sample Properties of Simulations Using Latin Hypercube Sampling.
#' \emph{Technometrics}. \bold{29}, 143--151.
#'
#' @examples
#' set.seed(1234)
#' a <- randomLHS(4,3)
#' b <- optAugmentLHS(a, 2, 3)
optAugmentLHS <- function(lhs, m=1, mult=2)
{
if (is.matrix(lhs) == FALSE)
stop("Input Design must be in the Matrix class\n")
if (length(m) != 1 | length(mult) != 1)
stop("m and mult may not be vectors")
if (is.na(m) | is.infinite(m))
stop("m may not be infinite, NA, or NaN")
if (is.na(mult) | is.infinite(mult))
stop("mult may not be infinite, NA, or NaN")
if (m != floor(m) | m < 1)
stop("m must be a positive integer\n")
if (any(is.na(lhs) == TRUE))
stop("Input Design cannot contain any NA entries\n")
if (any(lhs < 0 | lhs > 1))
stop("Input Design must have entries on the interval [0,1]\n")
K <- ncol(lhs)
N <- nrow(lhs)
colvec <- order(runif(K))
rowvec <- order(runif(N + m))
B <- matrix(nrow = (N + m), ncol = K)
for (j in colvec) {
newrow <- 0
for (i in rowvec) {
if ((any((i - 1)/(N + m) <= lhs[ ,j] & lhs[ ,j] <= i/(N + m))) == FALSE) {
newrow <- newrow + 1
B[newrow, j] <- runif(1, (i - 1)/(N + m), i/(N + m))
}
}
}
lhs <- rbind(lhs, matrix(nrow = m, ncol = K))
for (k in 1:m) {
P <- matrix(nrow = m*mult, ncol = K)
for (i in 1:K) {
P[,i] <- runifint(m*mult, 1, length(na.exclude(B[,i])))
}
for (i in 1:K) {
for (j in 1:(m*mult)) {
P[j, i] <- B[P[j, i], i]
}
}
vec <- numeric(K)
dist1 <- 0
maxdist <- .Machine$double.xmin
for (i in 1:(m*mult - k + 1)) {
dist1 <- numeric(N + k - 1)
for (j in 1:(N + k - 1)) {
vec <- P[i,] - lhs[j,]
dist1[j] <- vec %*% vec
}
if (sum(dist1) > maxdist) {
maxdist <- sum(dist1)
maxrow <- i
}
}
lhs[N + k,] <- P[maxrow,]
for (i in 1:K) {
for (j in 1:length(na.omit(B[,i]))) {
if (P[maxrow,i] == B[j,i]) B[j,i] <- NA
}
}
for (i in 1:K) {
if (length(na.omit(B[,i])) == 0)
next
u <- length(na.omit(B[,i]))
B[1:u,i] <- na.omit(B[,i])
B[(u + 1):m,i] <- NA
}
}
return(lhs)
}
lhs/R/runifint.r 0000644 0001762 0000144 00000001007 13636661136 013273 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Create a Random Sample of Uniform Integers
#'
#' @param n The number of samples
#' @param min_int the minimum integer \code{x >= min_int}
#' @param max_int the maximum integer \code{x <= max_int}
#'
#' @return the sample sample of size \code{n}
#'
#' @export
#' @importFrom stats runif
runifint <- function(n=1, min_int=0, max_int=1)
{
r <- runif(n, min = 0, max = 1)
int <- min_int + floor(r * (max_int + 1 - min_int))
int <- pmin(int, max_int)
return(int)
}
lhs/R/improvedLHS.r 0000644 0001762 0000144 00000006010 14115501127 013612 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Improved Latin Hypercube Sample
#'
#' Draws a Latin Hypercube Sample from a set of uniform distributions for use in
#' creating a Latin Hypercube Design. This function attempts to optimize the
#' sample with respect to an optimum euclidean distance between design points.
#'
#' @details Latin hypercube sampling (LHS) was developed to generate a distribution
#' of collections of parameter values from a multidimensional distribution.
#' A square grid containing possible sample points is a Latin square iff there
#' is only one sample in each row and each column. A Latin hypercube is the
#' generalisation of this concept to an arbitrary number of dimensions. When
#' sampling a function of \code{k} variables, the range of each variable is divided
#' into \code{n} equally probable intervals. \code{n} sample points are then drawn such that a
#' Latin Hypercube is created. Latin Hypercube sampling generates more efficient
#' estimates of desired parameters than simple Monte Carlo sampling.
#'
#' This program generates a Latin Hypercube Sample by creating random permutations
#' of the first \code{n} integers in each of \code{k} columns and then transforming those
#' integers into n sections of a standard uniform distribution. Random values are
#' then sampled from within each of the n sections. Once the sample is generated,
#' the uniform sample from a column can be transformed to any distribution byusing the quantile functions, e.g. qnorm(). Different columns can have
#' different distributions.
#'
#' This function attempts to optimize the sample with respect to an optimum
#' euclidean distance between design points.
#' \deqn{Optimum distance = frac{n}{n^{\frac{1.0}{k}}}}{Optimum distance = n/n^(1.0/k)}
#'
#' @param n The number of partitions (simulations or design points or rows)
#' @param k The number of replications (variables or columns)
#' @param dup A factor that determines the number of candidate points used in the
#' search. A multiple of the number of remaining points than can be added.
#'
#' @return An \code{n} by \code{k} Latin Hypercube Sample matrix with values uniformly distributed on [0,1]
#' @export
#' @keywords design
#'
#' @references
#' Beachkofski, B., Grandhi, R. (2002) Improved Distributed Hypercube Sampling
#' \emph{American Institute of Aeronautics and Astronautics Paper} \bold{1274}.
#'
#' This function is based on the MATLAB program written by John Burkardt and modified 16 Feb 2005
#' \url{https://people.math.sc.edu/Burkardt/m_src/ihs/ihs.html}
#'
#' @seealso [randomLHS()], [geneticLHS()], [maximinLHS()], and [optimumLHS()]
#' to generate Latin Hypercube Samples. [optAugmentLHS()], [optSeededLHS()], and
#' [augmentLHS()] to modify and augment existing designs.
#'
#' @examples
#' set.seed(1234)
#' A <- improvedLHS(4, 3, 2)
improvedLHS <- function(n, k, dup=1)
{
result <- .Call("improvedLHS_cpp", as.integer(n), as.integer(k), as.integer(dup),
PACKAGE = "lhs")
return(result)
}
lhs/R/geneticLHS.R 0000644 0001762 0000144 00000010037 13425055402 013352 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Latin Hypercube Sampling with a Genetic Algorithm
#'
#' Draws a Latin Hypercube Sample from a set of uniform distributions for use in
#' creating a Latin Hypercube Design. This function attempts to optimize the
#' sample with respect to the S optimality criterion through a genetic type
#' algorithm.
#'
#' @details Latin hypercube sampling (LHS) was developed to generate a distribution
#' of collections of parameter values from a multidimensional distribution.
#' A square grid containing possible sample points is a Latin square iff there
#' is only one sample in each row and each column. A Latin hypercube is the
#' generalisation of this concept to an arbitrary number of dimensions. When
#' sampling a function of \code{k} variables, the range of each variable is divided
#' into \code{n} equally probable intervals. \code{n} sample points are then drawn such that a
#' Latin Hypercube is created. Latin Hypercube sampling generates more efficient
#' estimates of desired parameters than simple Monte Carlo sampling.
#'
#' This program generates a Latin Hypercube Sample by creating random permutations
#' of the first \code{n} integers in each of \code{k} columns and then transforming those
#' integers into n sections of a standard uniform distribution. Random values are
#' then sampled from within each of the n sections. Once the sample is generated,
#' the uniform sample from a column can be transformed to any distribution by
#' using the quantile functions, e.g. qnorm(). Different columns can have
#' different distributions.
#'
#' S-optimality seeks to maximize the mean distance from each design point to all
#' the other points in the design, so the points are as spread out as possible.
#'
#' Genetic Algorithm:
#' \enumerate{
#' \item Generate \code{pop} random latin hypercube designs of size \code{n} by \code{k}
#' \item Calculate the S optimality measure of each design
#' \item Keep the best design in the first position and throw away half of the rest of the population
#' \item Take a random column out of the best matrix and place it in a random column of each of the other matricies, and take a random column out of each of the other matricies and put it in copies of the best matrix thereby causing the progeny
#' \item For each of the progeny, cause a genetic mutation \code{pMut} percent of the time. The mutation is accomplished by swtching two elements in a column
#' }
#'
#' @param n The number of partitions (simulations or design points or rows)
#' @param k The number of replications (variables or columns)
#' @param pop The number of designs in the initial population
#' @param gen The number of generations over which the algorithm is applied
#' @param pMut The probability with which a mutation occurs in a column of the progeny
#' @param criterium The optimality criterium of the algorithm. Default is \code{S}. \code{Maximin} is also supported
#' @param verbose Print informational messages. Default is \code{FALSE}
#'
#' @return An \code{n} by \code{k} Latin Hypercube Sample matrix with values uniformly distributed on [0,1]
#' @export
#' @references
#' Stocki, R. (2005) A method to improve design reliability using optimal Latin hypercube sampling
#' \emph{Computer Assisted Mechanics and Engineering Sciences} \bold{12}, 87--105.
#'
#' Stein, M. (1987) Large Sample Properties of Simulations Using Latin Hypercube Sampling.
#' \emph{Technometrics}. \bold{29}, 143--151.
#'
#' @seealso [randomLHS()], [improvedLHS()], [maximinLHS()],
#' and [optimumLHS()] to generate Latin Hypercube Samples. [optAugmentLHS()]
#' [optSeededLHS()], and [augtmentLHS()] to modify and augment existing designs.
#'
#' @keywords design
#' @author Rob Carnell
#'
#' @examples
#' set.seed(1234)
#' A <- geneticLHS(4, 3, 50, 5, .25)
geneticLHS <- function(n=10, k=2, pop=100, gen=4, pMut=.1, criterium="S", verbose=FALSE)
{
.Call("geneticLHS_cpp", as.integer(n), as.integer(k), as.integer(pop), as.integer(gen),
pMut, criterium, as.logical(verbose), PACKAGE = "lhs")
}
lhs/R/quantile_transforms.R 0000644 0001762 0000144 00000006204 14635432227 015476 0 ustar ligges users # Copyright 2024 Robert Carnell
#' Quantile Transformations
#'
#' A collection of functions that transform the margins of a Latin hypercube
#' sample in multiple ways
#'
#' \code{qdirichlet} is not an exact quantile function since the quantile of a
#' multivariate distribution is not unique. \code{qdirichlet} is also not the
#' independent quantiles of the marginal distributions since
#' those quantiles do not sum to one. \code{qdirichlet} is the quantile of the
#' underlying gamma functions, normalized. This is the same procedure that
#' is used to generate random deviates from the Dirichlet distribution therefore
#' it will produce transformed Latin hypercube samples with the intended distribution.
#'
#' \code{q_factor} divides the [0,1] interval into \code{nlevel(fact)} equal sections
#' and assigns values in those sections to the factor level.
#'
#' @rdname quanttrans
#'
#' @param p a vector of LHS samples on (0,1)
#' @param fact a factor or categorical variable. Ordered and un-ordered variables are allowed.
#' @param a a minimum integer
#' @param b a maximum integer
#' @param X multiple columns of an LHS sample on (0,1)
#' @param alpha Dirichlet distribution parameters. All \code{alpha >= 1} The marginal
#' mean probability of the Dirichlet distribution is given by \code{alpha[i] / sum(alpha)}
#'
#' @return the transformed column or columns
#' @export
#'
#' @examples
#' X <- randomLHS(20, 7)
#' Y <- as.data.frame(X)
#' Y[,1] <- qnorm(X[,1], 2, 0.5)
#' Y[,2] <- qfactor(X[,2], factor(LETTERS[c(1,3,5,7,8)]))
#' Y[,3] <- qinteger(X[,3], 5, 17)
#' Y[,4:6] <- qdirichlet(X[,4:6], c(2,3,4))
#' Y[,7] <- qfactor(X[,7], ordered(LETTERS[c(1,3,5,7,8)]))
qfactor <- function(p, fact)
{
if (!is.factor(fact)) {
stop("fact must be a factor or ordered")
}
if (!is.numeric(p) | any(p < 0) | any(p > 1)) {
stop("p must be a numeric between 0 and 1")
}
nlev <- nlevels(fact)
cut(p, breaks = (0:nlev) / nlev, labels = levels(fact),
ordered_result = is.ordered(fact))
}
#' @rdname quanttrans
#'
#' @export
qinteger <- function(p, a, b)
{
if (!is.numeric(p) | any(p < 0) | any(p > 1)) {
stop("p must be a numeric between 0 and 1")
}
if (!is.integer(a) | !is.integer(b)) {
if (any(as.integer(a) != a) | any(as.integer(b) != b)) {
stop("a and b must be integers or numerics that do not require coersion to integers")
}
}
if (b < a) {
stop("b must be greater than a")
}
floor(p*(b - a + 1)) + a
}
#' @rdname quanttrans
#' @importFrom stats qgamma
#'
#' @export
qdirichlet <- function(X, alpha)
{
lena <- length(alpha)
if (!is.matrix(X) & !is.data.frame(X)) {
stop("X must be a matrix for qdirichlet")
}
sims <- dim(X)[1]
if (dim(X)[2] != lena) {
stop("the number of columns of X must be equal to the length of alpha in qdirichlet")
}
if(any(is.na(alpha)) || any(is.na(X))) {
stop("NA values not allowed in qdirichlet")
}
Y <- matrix(0, nrow=sims, ncol=lena)
ind <- which(alpha != 0)
for(i in ind)
{
Y[,i] <- stats::qgamma(X[,i], alpha[i], 1)
}
Y <- Y / rowSums(Y)
return(Y)
}
lhs/R/get_library_versions.R 0000644 0001762 0000144 00000000622 13754762734 015641 0 ustar ligges users # Copyright 2020 Robert Carnell
#' Get version information for all libraries in the lhs package
#'
#' @return a character string containing the versions
#' @export
#'
#' @importFrom utils packageVersion
#'
#' @examples
#' get_library_versions()
get_library_versions <- function()
{
return(paste(.Call("get_library_versions", PACKAGE = "lhs"), "lhs:", utils::packageVersion("lhs")))
}
lhs/R/create_oalhs.R 0000644 0001762 0000144 00000001476 13425055207 014030 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Create an orthogonal array Latin hypercube
#'
#' @param n the number of samples or rows in the LHS (integer)
#' @param k the number of parameters or columns in the LHS (integer)
#' @param bChooseLargerDesign should a larger oa design be chosen than the n and k requested?
#' @param bverbose should information be printed with execution
#'
#' @return a numeric matrix which is an orthogonal array Latin hypercube sample
#' @export
#'
#' @examples
#' set.seed(34)
#' A <- create_oalhs(9, 4, TRUE, FALSE)
#' B <- create_oalhs(9, 4, TRUE, FALSE)
create_oalhs <- function(n, k, bChooseLargerDesign, bverbose)
{
return(.Call("create_oalhs", as.integer(n), as.integer(k),
as.logical(bChooseLargerDesign),
as.logical(bverbose), PACKAGE = "lhs"))
}
lhs/R/optSeededLHS.R 0000644 0001762 0000144 00000004342 13425060547 013660 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Optimum Seeded Latin Hypercube Sample
#'
#' Augments an existing Latin Hypercube Sample, adding points to the design, while
#' maintaining the \emph{latin} properties of the design. This function then uses the
#' columnwise pairwise (\acronym{CP}) algoritm to optimize the design. The original design is not necessarily maintained.
#'
#' @details
#' Augments an existing Latin Hypercube Sample, adding points to the design, while
#' maintaining the \emph{latin} properties of the design. This function then uses the
#' \acronym{CP} algoritm to optimize the design. The original design
#' is not necessarily maintained.
#'
#' @param seed The number of partitions (simulations or design points)
#' @param m The number of additional points to add to the seed matrix \code{seed}. default value is zero. If m is zero then the seed design is optimized.
#' @param maxSweeps The maximum number of times the CP algorithm is applied to all the columns.
#' @param eps The optimal stopping criterion
#' @param verbose Print informational messages
#'
#' @return An \code{n} by \code{k} Latin Hypercube Sample matrix with values uniformly distributed on [0,1]
#' @export
#' @seealso [randomLHS()], [geneticLHS()], [improvedLHS()], [maximinLHS()], and
#' [optimumLHS()] to generate Latin Hypercube Samples. [optAugmentLHS()] and
#' [augmentLHS()] to modify and augment existing designs.
#' @keywords design
#'
#' @references
#' Stein, M. (1987) Large Sample Properties of Simulations Using Latin Hypercube Sampling.
#' \emph{Technometrics}. \bold{29}, 143--151.
#'
#' @examples
#' set.seed(1234)
#' a <- randomLHS(4,3)
#' b <- optSeededLHS(a, 2, 2, .1)
optSeededLHS <- function(seed, m=0, maxSweeps=2, eps=.1, verbose=FALSE)
{
k <- ncol(seed)
if (!is.numeric(m) || is.na(m) || !is.finite(m) || m < 0)
stop("m must be a positive number")
if (m == 0)
{
N <- nrow(seed)
Pold <- seed
}
else
{
N <- m + nrow(seed)
Pold <- augmentLHS(seed, m)
}
result <- .Call("optSeededLHS_cpp", as.integer(N), as.integer(k),
as.integer(maxSweeps), eps, Pold, as.logical(verbose),
PACKAGE = "lhs")
return(result)
}
lhs/R/augmentLHS.R 0000644 0001762 0000144 00000007176 13425055135 013411 0 ustar ligges users # Copyright 2019 Robert Carnell
#' Augment a Latin Hypercube Design
#'
#' Augments an existing Latin Hypercube Sample, adding points to the design, while
#' maintaining the \emph{latin} properties of the design.
#'
#' @details Augments an existing Latin Hypercube Sample, adding points to the design, while
#' maintaining the \emph{latin} properties of the design. Augmentation is perfomed
#' in a random manner.
#'
#' The algorithm used by this function has the following steps.
#' First, create a new matrix to hold the candidate points after the design has
#' been re-partitioned into \eqn{(n+m)^{2}}{(n+m)^2} cells, where n is number of
#' points in the original \code{lhs} matrix. Then randomly sweep through each
#' column (1\ldots\code{k}) in the repartitioned design to find the missing cells.
#' For each column (variable), randomly search for an empty row, generate a
#' random value that fits in that row, record the value in the new matrix.
#' The new matrix can contain more filled cells than \code{m} unles \eqn{m = 2n},
#' in which case the new matrix will contain exactly \code{m} filled cells.
#' Finally, keep only the first m rows of the new matrix. It is guaranteed to
#' have \code{m} full rows in the new matrix. The deleted rows are partially full.
#' The additional candidate points are selected randomly due to the random search
#' for empty cells.
#'
#' @param lhs The Latin Hypercube Design to which points are to be added.
#' Contains an existing latin hypercube design with a number of rows equal
#' to the points in the design (simulations) and a number of columns equal
#' to the number of variables (parameters). The values of each cell must be
#' between 0 and 1 and uniformly distributed
#' @param m The number of additional points to add to matrix \code{lhs}
#'
#' @return An \code{n} by \code{k} Latin Hypercube Sample matrix with values
#' uniformly distributed on [0,1]
#' @export
#' @importFrom stats runif
#'
#' @author Rob Carnell
#' @references
#' Stein, M. (1987) Large Sample Properties of Simulations Using Latin
#' Hypercube Sampling. \emph{Technometrics}. \bold{29}, 143--151.
#'
#' @keywords design
#'
#' @seealso [randomLHS()], [geneticLHS()], [improvedLHS()], [maximinLHS()],
#' and [optimumLHS()] to generate Latin Hypercube Samples. [optAugmentLHS()]
#' and [optSeededLHS()] to modify and augment existing designs.
#'
#' @examples
#' set.seed(1234)
#' a <- randomLHS(4,3)
#' b <- augmentLHS(a, 2)
augmentLHS <- function(lhs, m=1)
{
if (!is.matrix(lhs))
stop("Input Latin Hypercube Design must be in the Matrix class\n")
if (length(m) != 1)
stop("m may not be a vector")
if (is.na(m) | is.infinite(m))
stop("m may not be infinite, NA, or NaN")
if (m != floor(m) | m < 1)
stop("m must be a positive integer\n")
if (any(is.na(lhs)))
stop("Input Design cannot contain any NA entries\n")
if (any(lhs < 0 | lhs > 1))
stop(paste("Input Latin Hypercube Design must have entries on the ",
"interval [0,1] which are uniformly distributed\n", sep = ""))
K <- ncol(lhs)
N <- nrow(lhs)
colvec <- order(runif(K))
rowvec <- order(runif(N + m))
B <- matrix(nrow = (N + m), ncol = K)
for (j in colvec)
{
newrow <- 0
for (i in rowvec)
{
if (!(any((i - 1)/(N + m) <= lhs[ ,j] & lhs[ ,j] <= i/(N + m)))) {
newrow <- newrow + 1
B[newrow, j] <- runif(1, (i - 1)/(N + m), i/(N + m))
}
}
}
if (is.matrix(B[1:m,]))
{
E <- rbind(lhs, B[1:m, ])
} else
{
E <- rbind(lhs, matrix(B[1:m,], nrow = m, ncol = K))
}
row.names(E) <- NULL
return(E)
}
lhs/NEWS 0000644 0001762 0000144 00000010413 14640332716 011544 0 ustar ligges users Changes in version 0.1 (2006-07-11)
- Initial release
Changes in version 0.2 (2006-07-21)
- Bug suggested by Bjarne Hansen fixed 7/20/06. Bug involved augmenting lhs
samples with one parameter. Example a <- randomLHS(4, 1); augmentLHS(a, 2).
- RUnit test added to check this bug. All RUnit tests satisfactory.
Changes in version 0.3 (2006-10-22)
- Another bug reported by Bjarne Hansen on 7/23/06, and fixed on 10/21/06.
The bug involved augmenting a hypercube with one point. RUnit test added
to check this bug. All RUnit tests satisfactory.
- Also added documentation consisting of an augmentation example.
- Added a lhs package help page.
Changes in version 0.4
- Changed the license to GPL >= 2 according to a Kurt Hornik email
Changes in version 0.5 (2009-01-26)
- Change output filenames to be portable
Changes in version 0.6
- Added a new option to randomLHS to allow for similar lhs's when
the seed is set and columns are added
Changes in version 0.7 (2012-03-27)
- Removed test directories to fix them for the proper package structure.
Changes in version 0.8 (2012-07-11)
- Refactored the underlying C code into C++ to add range checks
for internal arrays. Corrected a bug suggested by XXXX on DATE.
Corrected the bug suggested by Prof Ripley on DATE with the
range checking.
Changes in version 0.9
- Removed non-portable code introduced in Version 0.8
Changes in version 0.10 (2012-07-13)
- Changed static template method definitions to be included in the
definition of the utilityLHS class, instead of in the header below the class.
Changes in version 0.11
- Fixed a bug in the geneticLHS code and added Maximin to the optimization criteria.
- Fixed a bug caused by a change in R2.3.3 that requires a matrix dimnames to be a list. Added an importFrom to
the namespace.
Changes in version 0.12 (2016-01-15)
- Fixed a bug in the Description file.
Changes in version 0.13 (2016-01-18)
- Numerical accuracy checks on the Solaris 10 systems was failing. Unable to reproduce this error easily,
but it is limited to the numerical accuracy tests. Eliminated the numerical tests for Solaris/Sun/Sparc.
Changes in version 0.14 (2016-08-09)
- Fixed a bug suggested by Roland Lowe on 8/4/2016.
Changes in version 0.15 (2017-12-20)
- Added registration of native routines.
Changes in version 0.16 (2018-01-04)
- Removed the file output of test files which were causing errors on CRAN.
changed the version dependency to >= 3.3.0. (>=3.4.0 caused errors on CRAN)
Changes in version 1.0 (2019-01-31)
- Major revision.
- Changed all underlying C code to C++ using Rcpp.
- Added orthogonal array latin hypercube capability
Changes in version 1.0.1 (2019-02-03)
- Update to fix a memory leak noticed on CRAN servers when checking examples with valgrind
Changes in version 1.0.2 (2020-04-13)
- Added references to the Orthogonal array functions
- Fixed typos
- Changed the way the Rcpp::RNGScope object is destructed based on the debugging efforts of @mb706
Changes in version 1.1.0 (2020-09-29)
- Bug reports from Dr. Ulrike Groemping (https://github.com/bertcarnell/lhs/issues/26, https://github.com/bertcarnell/lhs/issues/25)
- Fixed underlying C++ code for addelkempn orthogonal array algorithm.
- Fixed underlying C++ for bosebushl orthogonal array algorithm.
- Added regression tests
Changes in version 1.1.1 (2020-10-05)
- Corrected memory leak and read out-of-bounds
Changes in version 1.1.2 (2021-09-07)
- Exposed functions related to Galois fields from the oa C++ library
Changes in version 1.1.3 (2021-09-08)
- Corrected compilation flag issue on Solaris
Changes in version 1.1.4 (2022-02-20)
- Correct error in logical statement with length > 1
Changes in version 1.1.5 (2022-03-22)
- std::iterator, std::binary_function, and std::unary_funcation were removed since they are deprecated in c++17 and later
Changes in version 1.1.6 (2022-12-17)
- Bug fix for a warning on CRAN "init.c:14:33: warning: a function declaration without a prototype is deprecated in all versions of C [-Wstrict-prototypes]"
Changes in version 1.1.7 (2023-06-30)
- Added correlated LHS samples and new quantile functions
Changes in version 1.2.0 (2023-06-30)
- Updates for CRAN checks
lhs/vignettes/ 0000755 0001762 0000144 00000000000 14640352504 013053 5 ustar ligges users lhs/vignettes/correlated_lhs.Rmd 0000644 0001762 0000144 00000012024 14635433106 016512 0 ustar ligges users ---
title: "Examples of Correlated and Multivariate Latin hypercubes"
author: "Rob Carnell"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Examples of Correlated and Multivariate Latin hypercubes}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
%\VignetteAuthor{Rob Carnell}
%\VignetteKeyword{lhs}
%\VignetteKeyword{latin hypercube}
%\VignetteKeyword{correlated}
%\VignetteKeyword{multivariate}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
source("VignetteCommonCode.R")
require(lhs)
```
Generally, Latin hypercubes are drawn in a way that makes the marginal distributions
for the uniform Latin hypercube and transformed Latin hypercube independent. In
some cases researches want to use Latin hypercubes to study problem areas where
the marginal distributions are correlated or come from a multivariate distribution.
There are a variety of methods to create such a Latin hypercube. The method used
in this package is to draw and transform an uncorrelated hypercube and then
use the columnwise-pairwise algorithm to adjust to a set of pre-defined conditions.
A set of predefined conditions are not always achievable.
## Example 1: Simple Correlation
Assumptions:
- $X_1 \sim uniform(2, 4)$
- $X_2 \sim Normal(1, 3)$
- $X_3 \sim Exponential(3)$
- $X_4 \sim LogNormal(1, 1)$
- $cor(x_1, x_2) = 0.3$
- $cor(x_3, x_4) = 0.5$
```{r}
lhs_A <- correlatedLHS(lhs::randomLHS(30, 4),
marginal_transform_function = function(W, ...) {
W[,1] <- qunif(W[,1], 2, 4)
W[,2] <- qnorm(W[,2], 1, 3)
W[,3] <- qexp(W[,3], 3)
W[,4] <- qlnorm(W[,4], 1, 1)
return(W)
},
cost_function = function(W, ...) {
(cor(W[,1], W[,2]) - 0.3)^2 + (cor(W[,3], W[,4]) - 0.5)^2
},
debug = FALSE, maxiter = 1000)
```
Check that the desired correlations were created:
```{r}
cor(lhs_A$transformed_lhs[,1:2])[1,2]
cor(lhs_A$transformed_lhs[,3:4])[1,2]
```
## Example 2: Dirichlet distribution
Assume that we want $X$ to be Dirichlet distributed with $\alpha = 4,3,2,1$
Therefore the margins of the Dirichlet are:
- $X_1 ~ beta(4, 10-4)$
- $X_2 ~ beta(3, 10-3)$
- $X_3 ~ beta(2, 10-2)$
- $X_4 ~ beta(1, 10-1)$
### Method 1: `correlatedLHS`
```{r}
lhs_B <- correlatedLHS(lhs::randomLHS(30, 4),
marginal_transform_function = function(W, ...) {
W[,1] <- qbeta(W[,1], 4, 6)
W[,2] <- qbeta(W[,2], 3, 7)
W[,3] <- qbeta(W[,3], 2, 8)
W[,4] <- qbeta(W[,4], 1, 9)
return(W)
},
cost_function = function(W, ...) {
sum((apply(W, 1, sum) - 1)^2)
},
debug = FALSE,
maxiter = 1000)
```
Check properties
```{r}
range(apply(lhs_B$transformed_lhs, 1, sum)) # close to 1
apply(lhs_B$transformed_lhs, 2, mean) # close to 4/10, 3/10, 2/10, 1/10
```
### Method 2: `q_dirichlet`
```{r}
lhs_B <- lhs::qdirichlet(lhs::randomLHS(30, 4), c(4,3,2,1))
```
Check properties
```{r}
all(abs(apply(lhs_B, 1, sum) - 1) < 1E-9) # all exactly 1
apply(lhs_B, 2, mean) # close to 4/10, 3/10, 2/10, 1/10
```
## Example 3: Rejection Sample
Assumptions:
- $X_1 \sim uniform(1, 4)$
- $X_2 \sim uniform(10^{-6}, 2)$
- $X_3 \sim uniform(2, 6)$
- $X_4 \sim uniform(10^{-6}, 0.1)$
- $lower < \prod_{i=1}^4 X_i < upper$
First build an empirical sample using rejection sampling
```{r}
set.seed(3803)
N <- 100000
reject_samp <- data.frame(
v1 = runif(N, 1, 4),
v2 = runif(N, 1E-6, 2),
v3 = runif(N, 2, 6),
v4 = runif(N, 1E-6, 0.1)
)
p <- with(reject_samp, v1*v2*v3*v4)
ind <- which(p < 1 & p > 0.3)
reject_samp <- reject_samp[ind,]
```
Now build the correlated sample using the reject sample as an empirical
distribution function and the boundaries as a cost function.
```{r}
lhs_C <- correlatedLHS(lhs::randomLHS(30, 4),
marginal_transform_function = function(W, empirical_sample, ...) {
res <- W
for (i in 1:ncol(W)) {
res[,i] <- quantile(empirical_sample[,i], probs = W[,i])
}
return(res)
},
cost_function = function(W, ...) {
p <- W[,1]*W[,2]*W[,3]*W[,4]
pp <- length(which(p > 0.3 & p < 1)) / nrow(W)
return(1-pp)
},
debug = FALSE,
maxiter = 10000,
empirical_sample = reject_samp)
```
lhs/vignettes/augment_lhs.Rmd 0000644 0001762 0000144 00000014427 13416532121 016030 0 ustar ligges users ---
title: "An Example of Augmenting a Latin Hypercube"
author: "Rob Carnell"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{An Example of Augmenting a Latin Hypercube}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
%\VignetteAuthor{Rob Carnell}
%\VignetteKeyword{lhs}
%\VignetteKeyword{latin hypercube}
%\VignetteKeyword{augment}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
source("VignetteCommonCode.R")
require(lhs)
graph2DaugmentLHS1 <- function(sims, extras)
{
A <- randomLHS(sims, 2)
B <- augmentLHS(A, extras)
plot.default(A[,1], A[,2], type = "n", ylim = c(0,1),
xlim = c(0,1), xlab = "x1", ylab = "x2", xaxs = "i", yaxs = "i", main = "")
for (i in 1:length(A[,1]))
{
rect(floor(A[i,1]*sims)/sims, floor(A[i,2]*sims)/sims,
ceiling(A[i,1]*sims)/sims, ceiling(A[i,2]*sims)/sims, col = "grey")
}
points(A[,1], A[,2], pch = 19, col = "red")
abline(v = (0:sims)/sims, h = (0:sims)/sims)
return(list(A = A, B = B, sims = sims, extras = extras))
}
graph2DaugmentLHS2 <- function(X)
{
A <- X$A
B <- X$B
sims <- X$sims
extras <- X$extras
plot.default(A[,1], A[,2], type = "n", ylim = c(0,1),
xlim = c(0,1), xlab = "x1", ylab = "x2", xaxs = "i", yaxs = "i", main = "")
N <- sims + extras
for (i in 1:length(B[,1]))
{
rect(floor(B[i,1]*N)/N, floor(B[i,2]*N)/N,
ceiling(B[i,1]*N)/N, ceiling(B[i,2]*N)/N, col = "grey")
}
points(A[,1], A[,2], pch = 19, col = "red")
points(B[((sims + 1):(sims + extras)), 1], B[((sims + 1):(sims + extras)), 2],
pch = 19, col = "blue")
abline(v = (0:N)/N, h = (0:N)/N)
}
# X <- graph2DaugmentLHS1(5,5)
# graph2DaugmentLHS2(X)
```
Suppose that a computer simulation study is being designed that requires
expensive runs. A Latin hypercube design is desired for this simulation so
that the expectation of the simulation output can be estimated efficiently
given the distributions of the input variables. Latin hypercubes are most
often used in highly dimensional problems, but the example shown is of small
dimension. Suppose further that the total extent of funding is uncertain.
Enough money is available for 5 runs, and there is a chance that there will be
enough for 5 more. However, if the money for the additional 5 runs does not
materialize, then the first 5 runs must be a Latin hypercube alone. A design
for this situation can be created using the `lhs` package.
First create a random Latin hypercube using the `randomLHS(n, k)` command:
```{r randomlhs}
A <- randomLHS(5,2)
```
An example of this hypercube is shown in `r registerFigure("X")`. Note that
the *Latin* property of the hypercube requires that each of the 5 equal
probability intervals be filled (i.e. each row and each column is filled with
one point). Also notice that the exact location of the design point is randomly
sampled from within that cell using a uniform distribution for each marginal
variable.
-----
`r addFigureCaption("X", "A randomly produced Latin Hypercube with uniform marginal distributions for 2 parameters with 5 simulations", register=FALSE)`
```{r original5, echo=FALSE, fig.align='center', fig.height=5, fig.width=5}
set.seed(10)
X <- graph2DaugmentLHS1(5, 5)
```
-----
Next, in order to augment the design with more points use `augmentLHS(lhs, m)`. The following will add 5 more points to the design:
```{r augment5}
B <- augmentLHS(A, 5)
```
The `augmentLHS` function works by re-dividing the original design into
`n+m` intervals (e.g. 5+5=10) keeping the original design points exactly in the
same position. It then randomly fills the empty row-column sets. The results
are shown in `r registerFigure("Y")`.
-----
`r addFigureCaption("Y", "A randomly produced Latin Hypercube of 5 points (red) with 5 augmented points (blue). Each parameter has a uniform marginal distribution.", register=FALSE)`
```{r augmented10, fig.align='center', echo=FALSE, fig.height=5, fig.width=5}
graph2DaugmentLHS2(X)
```
-----
The `augmentLHS` function uses the following algorithm (see the documentation for `augmentLHS`):
* Create a new `(n+m)` by `k` matrix to hold the candidate points after
the design has been re-partitioned into `(n+m)^2` cells, where `n` is number
of points in the original `lhs` matrix.
* Then randomly sweep through each
column (1...`k`) in the repartitioned design to find the missing cells.
* For each column (variable), randomly search for an empty row, generate a
random value that fits in that row, record the value in the new matrix.
The new matrix can contain more than `m` points unless `m = 2n`,
in which case the new matrix will contain exactly `m` filled rows.
* Finally, keep only the first `m` rows of the new matrix. It is
guaranteed that there will be `m` full rows (points) in the new matrix. The
deleted rows are partially full. The additional candidate points are selected
randomly because of the random search used to find empty cells.
Also notice that because the original points are randomly placed within the
cells, depending on how you bin the marginal distributions, a histogram (of x1
for example) will not necessarily be exactly uniform.
Now, the augmenting points do not necessarily form a Latin Hypercube themselves.
The original design and augmenting points may form a Latin Hypercube, or there
may be more than one point per row in the augmented design. If the augmented
points are equal to the number of original points, then a strictly uniform
Latin hypercube is guaranteed. An example of an augmented design which is not
uniform in the marginal distributions is given in `r registerFigure("Z")` and `r registerFigure("W")`.
The commands were:
```{r random_and_augment}
A <- randomLHS(7, 2)
B <- augmentLHS(A, 3)
```
-----
`r addFigureCaption("Z", "Original design with 7 points", register=FALSE)`
```{r Z, echo=FALSE, fig.align='center', fig.height=5, fig.width=5}
set.seed(12)
X <- graph2DaugmentLHS1(7, 3)
```
-----
`r addFigureCaption("W", "Augmented design with 3 additional points. Note that row 9 has 2 points and row 3 has none.", register=FALSE)`
```{r W, echo=FALSE, fig.align='center', fig.height=5, fig.width=5}
graph2DaugmentLHS2(X)
```
lhs/vignettes/lhs_basics.Rmd 0000644 0001762 0000144 00000017170 14204306507 015635 0 ustar ligges users ---
title: "Basic Latin hypercube samples and designs with package lhs"
author: "Rob Carnell"
date: "`r Sys.Date()`"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Basic Latin hypercube samples and designs with package lhs}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
%\VignetteAuthor{Rob Carnell}
%\VignetteKeyword{lhs}
%\VignetteKeyword{latin hypercube}
---
```{r setup, include = FALSE}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
require(lhs)
source("VignetteCommonCode.R")
graph2dLHS <- function(Alhs)
{
stopifnot(ncol(Alhs) == 2)
sims <- nrow(Alhs)
par(mar = c(4,4,2,2))
plot.default(Alhs[,1], Alhs[,2], type = "n", ylim = c(0,1),
xlim = c(0,1), xlab = "Parameter 1", ylab = "Parameter 2", xaxs = "i",
yaxs = "i", main = "")
for (i in 1:nrow(Alhs))
{
rect(floor(Alhs[i,1]*sims)/sims, floor(Alhs[i,2]*sims)/sims,
ceiling(Alhs[i,1]*sims)/sims, ceiling(Alhs[i,2]*sims)/sims, col = "grey")
}
points(Alhs[,1], Alhs[,2], pch = 19, col = "red")
abline(v = (0:sims)/sims, h = (0:sims)/sims)
}
# transform is a function of the kind that takes a number
# transform <- function(x){return(qnorm(x,mean=0, std=1))}
graph2dLHSTransform <- function(Alhs, transform1, transform2, min1, max1, min2, max2)
{
stopifnot(ncol(Alhs) == 2)
stopifnot(all(Alhs[,1] <= max1 & Alhs[,1] >= min1))
stopifnot(all(Alhs[,2] <= max2 & Alhs[,2] >= min2))
sims <- nrow(Alhs)
breaks <- seq(0,1,length = sims + 1)[2:(sims)]
breaksTransformed1 <- sapply(breaks, transform1)
breaksTransformed2 <- sapply(breaks, transform2)
par(mar = c(4,4,2,2))
plot.default(Alhs[,1], Alhs[,2], type = "n",
ylim = c(min2, max2),
xlim = c(min1, max1),
xlab = "Parameter 1", ylab = "Parameter 2",
xaxs = "i", yaxs = "i", main = "")
for (si in 1:sims)
{
temp <- Alhs[si,]
for (i in 1:sims)
{
if ((i == 1 && min1 <= temp[1] && breaksTransformed1[i] >= temp[1]) ||
(i == sims && max1 >= temp[1] && breaksTransformed1[i - 1] <= temp[1]) ||
(breaksTransformed1[i - 1] <= temp[1] && breaksTransformed1[i] >= temp[1]))
{
for (j in 1:sims)
{
if ((j == 1 && min2 <= temp[2] && breaksTransformed2[j] >= temp[2]) ||
(j == sims && max2 >= temp[2] && breaksTransformed2[j - 1] <= temp[2]) ||
(breaksTransformed2[j - 1] <= temp[2] && breaksTransformed2[j] >= temp[2]))
{
if (i == 1)
{
xbot <- min1
xtop <- breaksTransformed1[i]
} else if (i == sims)
{
xbot <- breaksTransformed1[i - 1]
xtop <- max1
} else
{
xbot <- breaksTransformed1[i - 1]
xtop <- breaksTransformed1[i]
}
if (j == 1)
{
ybot <- min2
ytop <- breaksTransformed2[j]
} else if (j == sims)
{
ybot <- breaksTransformed2[j - 1]
ytop <- max2
} else
{
ybot <- breaksTransformed2[j - 1]
ytop <- breaksTransformed2[j]
}
rect(xbot, ybot, xtop, ytop, col = "grey")
}
}
}
}
}
points(Alhs[,1], Alhs[,2], pch = 19, col = "red")
abline(v = breaksTransformed1, h = breaksTransformed2)
}
#set.seed(1111)
#A <- randomLHS(5,4)
#f <- function(x){qnorm(x)}
#g <- function(x){qlnorm(x, meanlog=0.5, sdlog=1)}
#B <- A
#B[,1] <- f(A[,1])
#B[,2] <- g(A[,2])
#graph2dLHSTransform(B[,1:2], f, g, -4, 4, 0, 8)
#f <- function(x){qunif(x, 3, 5)}
#B <- apply(A, 2, f)
#graph2dLHSTransform(B[,1:2], f)
```
### Theory of Latin Hypercube Sampling
For the technical basis of Latin Hypercube Sampling (LHS) and Latin Hypercube Designs (LHD) please see:
* Stein, Michael. _Large Sample Properties of Simulations Using Latin Hypercube Sampling_ Technometrics, Vol 28, No 2, 1987.
* McKay, MD, et.al. _A Comparison of Three Methods for Selecting Values of Input Variables in the Analysis of Output from a Computer Code_ Technometrics, Vol 21, No 2, 1979.
This package was created to bring these designs to R and to implement many of the articles that followed on optimized sampling methods.
### Create a Simple LHS
Basic LHS's are created using `randomLHS`.
```{r block1}
# set the seed for reproducibility
set.seed(1111)
# a design with 5 samples from 4 parameters
A <- randomLHS(5, 4)
A
```
In general, the LHS is uniform on the margins until transformed (`r registerFigure("X")`):
`r addFigureCaption("X", "Two dimensions of a Uniform random LHS with 5 samples", register=FALSE)`
```{r figureX, fig.align='center', fig.height=5, fig.width=5, echo=FALSE}
graph2dLHS(A[,1:2])
```
It is common to transform the margins of the design (the columns) into other
distributions (`r registerFigure("Y")`)
```{r block 3}
B <- matrix(nrow = nrow(A), ncol = ncol(A))
B[,1] <- qnorm(A[,1], mean = 0, sd = 1)
B[,2] <- qlnorm(A[,2], meanlog = 0.5, sdlog = 1)
B[,3] <- A[,3]
B[,4] <- qunif(A[,4], min = 7, max = 10)
B
```
`r addFigureCaption("Y", "Two dimensions of a transformed random LHS with 5 samples", register=FALSE)`
```{r figureY, fig.align='center', fig.height=5, fig.width=5, echo=FALSE}
f <- function(x){qnorm(x)}
g <- function(x){qlnorm(x, meanlog = 0.5, sdlog = 1)}
graph2dLHSTransform(B[,1:2], f, g, -4, 4, 0, 8)
```
### Optimizing the Design
The LHS can be optimized using a number of methods in the `lhs` package. Each
method attempts to improve on the random design by ensuring that the selected
points are as uncorrelated and space filling as possible. `r registerTable("tab1")` shows
some results. `r registerFigure("Z")`, `r registerFigure("W")`, and `r registerFigure("G")`
show corresponding plots.
```{r block 4}
set.seed(101)
A <- randomLHS(30, 10)
A1 <- optimumLHS(30, 10, maxSweeps = 4, eps = 0.01)
A2 <- maximinLHS(30, 10, dup = 5)
A3 <- improvedLHS(30, 10, dup = 5)
A4 <- geneticLHS(30, 10, pop = 1000, gen = 8, pMut = 0.1, criterium = "S")
A5 <- geneticLHS(30, 10, pop = 1000, gen = 8, pMut = 0.1, criterium = "Maximin")
```
-----
`r addTableCaption("tab1", "Sample results and metrics of various LHS algorithms", register=FALSE)`
Method | Min Distance btwn pts | Mean Distance btwn pts | Max Correlation btwn pts
:-----|:-----:|:-----:|:-----:
randomLHS | `r min(dist(A))` | `r mean(dist(A))` | `r max(abs(cor(A)-diag(10)))`
optimumLHS | `r min(dist(A1))` | `r mean(dist(A1))` | `r max(abs(cor(A1)-diag(10)))`
maximinLHS | `r min(dist(A2))` | `r mean(dist(A2))` | `r max(abs(cor(A2)-diag(10)))`
improvedLHS | `r min(dist(A3))` | `r mean(dist(A3))` | `r max(abs(cor(A3)-diag(10)))`
geneticLHS (S) | `r min(dist(A4))` | `r mean(dist(A4))` | `r max(abs(cor(A4)-diag(10)))`
geneticLHS (Maximin) | `r min(dist(A5))` | `r mean(dist(A5))` | `r max(abs(cor(A5)-diag(10)))`
-----
`r addFigureCaption("Z", "Pairwise margins of a randomLHS", register=FALSE)`
```{r Z, fig.align='center', fig.height=7, fig.width=7, echo=FALSE}
pairs(A, pch = 19, col = "blue", cex = 0.5)
```
-----
`r addFigureCaption("W", "Pairwise margins of a optimumLHS", register=FALSE)`
```{r W, fig.align='center', fig.height=7, fig.width=7, echo=FALSE}
pairs(A1, pch = 19, col = "blue", cex = 0.5)
```
-----
`r addFigureCaption("G", "Pairwise margins of a maximinLHS", register=FALSE)`
```{r G, fig.align='center', fig.height=7, fig.width=7, echo=FALSE}
pairs(A2, pch = 19, col = "blue", cex = 0.5)
```
lhs/vignettes/VignetteCommonCode.R 0000644 0001762 0000144 00000007424 13413303401 016723 0 ustar ligges users # short set of utilities to handle figure and Table naming in .Rmd files
numEnv <- new.env()
assign("figureList", list(), envir=numEnv)
assign("tableList", list(), envir=numEnv)
##################################
registerObject <- function(idName, objectListName)
{
# get the figure list since we can't just add a list element using assign
objectListLocal <- get(objectListName, envir=numEnv)
# if the idName is already used, error
if (!is.null(eval(parse(text=paste("objectListLocal$", idName, sep="")))))
{
stop(paste("idName already used prior to registerObject", idName))
}
# the new number is the old number of figures plus one
num <- length(objectListLocal) + 1
# assign to the local figure list
eval(parse(text=paste("objectListLocal$", idName, " <- ", num, sep="")))
# put the local figure list in the one contained in the environment
assign(objectListName, objectListLocal, pos=numEnv)
return(getObjectLink(idName, objectListName))
}
registerTable <- function(idName)
{
registerObject(idName, "tableList")
}
registerFigure <- function(idName)
{
registerObject(idName, "figureList")
}
getObjectCaption <- function(idName, objectListName)
{
num <- getObjectNum(idName, objectListName)
if (objectListName == "figureList")
{
return(paste("Figure", num))
} else if (objectListName == "tableList")
{
return(paste("Table", num))
} else
{
stop(paste("objectListName:", objectListName, "not recognized"))
}
}
getTableCaption <- function(idName)
{
getObjectCaption(idName, "tableList")
}
getFigureCaption <- function(idName)
{
getObjectCaption(idName, "figureList")
}
getObjectLink <- function(idName, objectListName)
{
num <- getObjectNum(idName, objectListName)
if (objectListName == "figureList")
{
return(paste("Figure ", num, "", sep=""))
} else if (objectListName == "tableList")
{
return(paste("Table ", num, "", sep=""))
} else
{
stop(paste("objectListName:", objectListName, "not recognized"))
}
}
getTableLink <- function(idName)
{
return(getObjectLink(idName, "tableList"))
}
getFigureLink <- function(idName)
{
getObjectLink(idName, "figureList")
}
getObjectNum <- function(idName, objectListName)
{
objectListLocal <- get(objectListName, envir=numEnv)
num <- eval(parse(text=paste("objectListLocal$", idName, sep="")))
if (is.null(num))
{
stop(paste("idName is not registered for", idName, "in", objectListName))
}
return(num)
}
getTableNum <- function(idName)
{
return(getObjectNum(idName, "tableList"))
}
getFigureNum <- function(idName)
{
getObjectNum(idName, "figureList")
}
addTableCaption <- function(idName, caption, register=FALSE)
{
cap <- ifelse(register, registerTable(idName), getTableCaption(idName))
paste("
* These programs construct and manipulate orthogonal arrays. They were prepared by * * - Art Owen * - Department of Statistics * - Sequoia Hall * - Stanford CA 94305 * * They may be freely used and shared. This code comes * with no warranty of any kind. Use it at your own * risk. * * I thank the Semiconductor Research Corporation and * the National Science Foundation for supporting this * work. **/ #include "runif.h" namespace oacpp { RUnif::RUnif() : RUnif(1, 2, 3, 4) { } RUnif::RUnif(int is, int js, int ks, int ls) { m_jent = m_i = m_j = m_k = m_l = ip = jp = 0; c = cd = cm = 0.0; RUnif::seed(is, js, ks, ls); } void RUnif::seed(SeedSet & seedSet) { RUnif::seed(seedSet.is, seedSet.js, seedSet.ks, seedSet.ls); } int RUnif::mod(int a, int b) { int ans; ans = a % b; if (ans >= 0) { return ans; } return ans + b; } int RUnif::seedok(int is, int js, int ks, int ls) { if (is == 1 && js == 1 && ks == 1 && ls == 1) { return SEEDBAD; } if (is < 1 || js < 1 || ks < 1 || ls < 1) { return SEEDBAD; } if (is > 168 || js > 168 || ks > 168 || ls > 168) { return SEEDBAD; } return SEEDOK; } void RUnif::seed(int is, int js, int ks, int ls) { m_jent = 0; if (seedok(is, js, ks, ls) == SEEDOK) { m_i = is; m_j = js; m_k = ks; m_l = ls; } else { std::ostringstream msg; msg << "Error: Invalid seed " << is << " " << js << " " << ks << " " << ls << "\n"; msg << "Must be four integers between 1 and 168, and\n"; msg << "must not all be 1.\n"; ostringstream_runtime_error(msg); } } SeedSet RUnif::getSeedSet() { SeedSet s = SeedSet(); s.is = m_i; s.js = m_j; s.ks = m_k; s.ls = m_l; return s; } void RUnif::runif(std::vector
matrix
class
* @tparam T the type of object stored in the matrix
* @tparam ISROWWISE a boolean to indicate if the matrix is iterated row-wise
*/
template matrix
class
* @tparam T the type of object stored in the matrix
* @tparam ISROWWISE a boolean to indicate if the matrix is iterated row-wise
*/
template * These programs construct and manipulate orthogonal arrays. They were prepared by * * - Art Owen * - Department of Statistics * - Sequoia Hall * - Stanford CA 94305 * * They may be freely used and shared. This code comes * with no warranty of any kind. Use it at your own * risk. * * I thank the Semiconductor Research Corporation and * the National Science Foundation for supporting this * work. **/ #include "primes.h" namespace oacpp { namespace primes { int isprime_old(int p) // LCOV_EXCL_START { if (p < 2) { return ISPRIMEFALSE; } /* This is not the fastest, but it is likely to take negligible time compared to that used in constructing the Galois field or the experimental design */ double maxDivisor = sqrt(static_cast